wat.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. # coding: utf-8
  2. import json
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. compat_urllib_parse,
  7. unified_strdate,
  8. )
  9. class WatIE(InfoExtractor):
  10. _VALID_URL=r'http://www.wat.tv/.*-(?P<shortID>.*?)_.*?.html'
  11. IE_NAME = 'wat.tv'
  12. _TEST = {
  13. u'url': u'http://www.wat.tv/video/world-war-philadelphia-vost-6bv55_2fjr7_.html',
  14. u'file': u'10631273.mp4',
  15. u'md5': u'0a4fe7870f31eaeabb5e25fd8da8414a',
  16. u'info_dict': {
  17. u'title': u'World War Z - Philadelphia VOST',
  18. u'description': u'La menace est partout. Que se passe-t-il à Philadelphia ?\r\nWORLD WAR Z, avec Brad Pitt, au cinéma le 3 juillet.\r\nhttp://www.worldwarz.fr',
  19. }
  20. }
  21. def download_video_info(self, real_id):
  22. # 'contentv4' is used in the website, but it also returns the related
  23. # videos, we don't need them
  24. info = self._download_webpage('http://www.wat.tv/interface/contentv3/' + real_id, real_id, 'Downloading video info')
  25. info = json.loads(info)
  26. return info['media']
  27. def _real_extract(self, url):
  28. def real_id_for_chapter(chapter):
  29. return chapter['tc_start'].split('-')[0]
  30. mobj = re.match(self._VALID_URL, url)
  31. short_id = mobj.group('shortID')
  32. webpage = self._download_webpage(url, short_id)
  33. real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
  34. video_info = self.download_video_info(real_id)
  35. chapters = video_info['chapters']
  36. first_chapter = chapters[0]
  37. if real_id_for_chapter(first_chapter) != real_id:
  38. self.to_screen('Multipart video detected')
  39. chapter_urls = []
  40. for chapter in chapters:
  41. chapter_id = real_id_for_chapter(chapter)
  42. # Yes, when we this chapter is processed by WatIE,
  43. # it will download the info again
  44. chapter_info = self.download_video_info(chapter_id)
  45. chapter_urls.append(chapter_info['url'])
  46. entries = [self.url_result(chapter_url) for chapter_url in chapter_urls]
  47. return self.playlist_result(entries, real_id, video_info['title'])
  48. # Otherwise we can continue and extract just one part, we have to use
  49. # the short id for getting the video url
  50. player_data = compat_urllib_parse.urlencode({'shortVideoId': short_id,
  51. 'html5': '1'})
  52. player_info = self._download_webpage('http://www.wat.tv/player?' + player_data,
  53. real_id, u'Downloading player info')
  54. player = json.loads(player_info)['player']
  55. html5_player = self._html_search_regex(r'iframe src="(.*?)"', player,
  56. 'html5 player')
  57. player_webpage = self._download_webpage(html5_player, real_id,
  58. u'Downloading player webpage')
  59. video_url = self._search_regex(r'urlhtml5 : "(.*?)"', player_webpage,
  60. 'video url')
  61. info = {'id': real_id,
  62. 'url': video_url,
  63. 'ext': 'mp4',
  64. 'title': first_chapter['title'],
  65. 'thumbnail': first_chapter['preview'],
  66. 'description': first_chapter['description'],
  67. 'view_count': video_info['views'],
  68. }
  69. if 'date_diffusion' in first_chapter:
  70. info['upload_date'] = unified_strdate(first_chapter['date_diffusion'])
  71. return info