bliptv.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. import datetime
  2. import json
  3. import os
  4. import re
  5. import socket
  6. from .common import InfoExtractor
  7. from ..utils import (
  8. compat_http_client,
  9. compat_parse_qs,
  10. compat_str,
  11. compat_urllib_error,
  12. compat_urllib_parse_urlparse,
  13. compat_urllib_request,
  14. ExtractorError,
  15. unescapeHTML,
  16. )
  17. class BlipTVIE(InfoExtractor):
  18. """Information extractor for blip.tv"""
  19. _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
  20. _URL_EXT = r'^.*\.([a-z0-9]+)$'
  21. IE_NAME = u'blip.tv'
  22. _TEST = {
  23. u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
  24. u'file': u'5779306.m4v',
  25. u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
  26. u'info_dict': {
  27. u"upload_date": u"20111205",
  28. u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",
  29. u"uploader": u"Comic Book Resources - CBR TV",
  30. u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
  31. }
  32. }
  33. def report_direct_download(self, title):
  34. """Report information extraction."""
  35. self.to_screen(u'%s: Direct download detected' % title)
  36. def _real_extract(self, url):
  37. mobj = re.match(self._VALID_URL, url)
  38. if mobj is None:
  39. raise ExtractorError(u'Invalid URL: %s' % url)
  40. # See https://github.com/rg3/youtube-dl/issues/857
  41. api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
  42. if api_mobj is not None:
  43. url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
  44. urlp = compat_urllib_parse_urlparse(url)
  45. if urlp.path.startswith('/play/'):
  46. response = self._request_webpage(url, None, False)
  47. redirecturl = response.geturl()
  48. rurlp = compat_urllib_parse_urlparse(redirecturl)
  49. file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
  50. url = 'http://blip.tv/a/a-' + file_id
  51. return self._real_extract(url)
  52. if '?' in url:
  53. cchar = '&'
  54. else:
  55. cchar = '?'
  56. json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
  57. request = compat_urllib_request.Request(json_url)
  58. request.add_header('User-Agent', 'iTunes/10.6.1')
  59. self.report_extraction(mobj.group(1))
  60. info = None
  61. urlh = self._request_webpage(request, None, False,
  62. u'unable to download video info webpage')
  63. if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
  64. basename = url.split('/')[-1]
  65. title,ext = os.path.splitext(basename)
  66. title = title.decode('UTF-8')
  67. ext = ext.replace('.', '')
  68. self.report_direct_download(title)
  69. info = {
  70. 'id': title,
  71. 'url': url,
  72. 'uploader': None,
  73. 'upload_date': None,
  74. 'title': title,
  75. 'ext': ext,
  76. 'urlhandle': urlh
  77. }
  78. if info is None: # Regular URL
  79. try:
  80. json_code_bytes = urlh.read()
  81. json_code = json_code_bytes.decode('utf-8')
  82. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  83. raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
  84. try:
  85. json_data = json.loads(json_code)
  86. if 'Post' in json_data:
  87. data = json_data['Post']
  88. else:
  89. data = json_data
  90. upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
  91. if 'additionalMedia' in data:
  92. formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
  93. best_format = formats[-1]
  94. video_url = best_format['url']
  95. else:
  96. video_url = data['media']['url']
  97. umobj = re.match(self._URL_EXT, video_url)
  98. if umobj is None:
  99. raise ValueError('Can not determine filename extension')
  100. ext = umobj.group(1)
  101. info = {
  102. 'id': compat_str(data['item_id']),
  103. 'url': video_url,
  104. 'uploader': data['display_name'],
  105. 'upload_date': upload_date,
  106. 'title': data['title'],
  107. 'ext': ext,
  108. 'format': data['media']['mimeType'],
  109. 'thumbnail': data['thumbnailUrl'],
  110. 'description': data['description'],
  111. 'player_url': data['embedUrl'],
  112. 'user_agent': 'iTunes/10.6.1',
  113. }
  114. except (ValueError,KeyError) as err:
  115. raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
  116. return [info]
  117. class BlipTVUserIE(InfoExtractor):
  118. """Information Extractor for blip.tv users."""
  119. _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
  120. _PAGE_SIZE = 12
  121. IE_NAME = u'blip.tv:user'
  122. def _real_extract(self, url):
  123. # Extract username
  124. mobj = re.match(self._VALID_URL, url)
  125. if mobj is None:
  126. raise ExtractorError(u'Invalid URL: %s' % url)
  127. username = mobj.group(1)
  128. page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
  129. page = self._download_webpage(url, username, u'Downloading user page')
  130. mobj = re.search(r'data-users-id="([^"]+)"', page)
  131. page_base = page_base % mobj.group(1)
  132. # Download video ids using BlipTV Ajax calls. Result size per
  133. # query is limited (currently to 12 videos) so we need to query
  134. # page by page until there are no video ids - it means we got
  135. # all of them.
  136. video_ids = []
  137. pagenum = 1
  138. while True:
  139. url = page_base + "&page=" + str(pagenum)
  140. page = self._download_webpage(url, username,
  141. u'Downloading video ids from page %d' % pagenum)
  142. # Extract video identifiers
  143. ids_in_page = []
  144. for mobj in re.finditer(r'href="/([^"]+)"', page):
  145. if mobj.group(1) not in ids_in_page:
  146. ids_in_page.append(unescapeHTML(mobj.group(1)))
  147. video_ids.extend(ids_in_page)
  148. # A little optimization - if current page is not
  149. # "full", ie. does not contain PAGE_SIZE video ids then
  150. # we can assume that this page is the last one - there
  151. # are no more ids on further pages - no need to query
  152. # again.
  153. if len(ids_in_page) < self._PAGE_SIZE:
  154. break
  155. pagenum += 1
  156. urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
  157. url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
  158. return [self.playlist_result(url_entries, playlist_title = username)]