generic.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import os
  4. import re
  5. import xml.etree.ElementTree
  6. from .common import InfoExtractor
  7. from .youtube import YoutubeIE
  8. from ..utils import (
  9. compat_urllib_error,
  10. compat_urllib_parse,
  11. compat_urllib_request,
  12. compat_urlparse,
  13. compat_xml_parse_error,
  14. ExtractorError,
  15. HEADRequest,
  16. smuggle_url,
  17. unescapeHTML,
  18. unified_strdate,
  19. url_basename,
  20. )
  21. from .brightcove import BrightcoveIE
  22. from .ooyala import OoyalaIE
  23. class GenericIE(InfoExtractor):
  24. IE_DESC = 'Generic downloader that works on some sites'
  25. _VALID_URL = r'.*'
  26. IE_NAME = 'generic'
  27. _TESTS = [
  28. {
  29. 'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
  30. 'file': '13601338388002.mp4',
  31. 'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd',
  32. 'info_dict': {
  33. 'uploader': 'www.hodiho.fr',
  34. 'title': 'R\u00e9gis plante sa Jeep',
  35. }
  36. },
  37. # bandcamp page with custom domain
  38. {
  39. 'add_ie': ['Bandcamp'],
  40. 'url': 'http://bronyrock.com/track/the-pony-mash',
  41. 'file': '3235767654.mp3',
  42. 'info_dict': {
  43. 'title': 'The Pony Mash',
  44. 'uploader': 'M_Pallante',
  45. },
  46. 'skip': 'There is a limit of 200 free downloads / month for the test song',
  47. },
  48. # embedded brightcove video
  49. # it also tests brightcove videos that need to set the 'Referer' in the
  50. # http requests
  51. {
  52. 'add_ie': ['Brightcove'],
  53. 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
  54. 'info_dict': {
  55. 'id': '2765128793001',
  56. 'ext': 'mp4',
  57. 'title': 'Le cours de bourse : l’analyse technique',
  58. 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
  59. 'uploader': 'BFM BUSINESS',
  60. },
  61. 'params': {
  62. 'skip_download': True,
  63. },
  64. },
  65. {
  66. # https://github.com/rg3/youtube-dl/issues/2253
  67. 'url': 'http://bcove.me/i6nfkrc3',
  68. 'file': '3101154703001.mp4',
  69. 'md5': '0ba9446db037002366bab3b3eb30c88c',
  70. 'info_dict': {
  71. 'title': 'Still no power',
  72. 'uploader': 'thestar.com',
  73. 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
  74. },
  75. 'add_ie': ['Brightcove'],
  76. },
  77. # Direct link to a video
  78. {
  79. 'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
  80. 'file': 'trailer.mp4',
  81. 'md5': '67d406c2bcb6af27fa886f31aa934bbe',
  82. 'info_dict': {
  83. 'id': 'trailer',
  84. 'title': 'trailer',
  85. 'upload_date': '20100513',
  86. }
  87. },
  88. # ooyala video
  89. {
  90. 'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
  91. 'file': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ.mp4',
  92. 'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
  93. 'info_dict': {
  94. 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
  95. 'ext': 'mp4',
  96. 'title': '2cc213299525360.mov', # that's what we get
  97. },
  98. },
  99. # embed.ly video
  100. {
  101. 'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
  102. 'info_dict': {
  103. 'id': '9ODmcdjQcHQ',
  104. 'ext': 'mp4',
  105. },
  106. # No need to test YoutubeIE here
  107. 'params': {
  108. 'skip_download': True,
  109. },
  110. },
  111. ]
  112. def report_download_webpage(self, video_id):
  113. """Report webpage download."""
  114. if not self._downloader.params.get('test', False):
  115. self._downloader.report_warning('Falling back on generic information extractor.')
  116. super(GenericIE, self).report_download_webpage(video_id)
  117. def report_following_redirect(self, new_url):
  118. """Report information extraction."""
  119. self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
  120. def _send_head(self, url):
  121. """Check if it is a redirect, like url shorteners, in case return the new url."""
  122. class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
  123. """
  124. Subclass the HTTPRedirectHandler to make it use our
  125. HEADRequest also on the redirected URL
  126. """
  127. def redirect_request(self, req, fp, code, msg, headers, newurl):
  128. if code in (301, 302, 303, 307):
  129. newurl = newurl.replace(' ', '%20')
  130. newheaders = dict((k,v) for k,v in req.headers.items()
  131. if k.lower() not in ("content-length", "content-type"))
  132. return HEADRequest(newurl,
  133. headers=newheaders,
  134. origin_req_host=req.get_origin_req_host(),
  135. unverifiable=True)
  136. else:
  137. raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
  138. class HTTPMethodFallback(compat_urllib_request.BaseHandler):
  139. """
  140. Fallback to GET if HEAD is not allowed (405 HTTP error)
  141. """
  142. def http_error_405(self, req, fp, code, msg, headers):
  143. fp.read()
  144. fp.close()
  145. newheaders = dict((k,v) for k,v in req.headers.items()
  146. if k.lower() not in ("content-length", "content-type"))
  147. return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
  148. headers=newheaders,
  149. origin_req_host=req.get_origin_req_host(),
  150. unverifiable=True))
  151. # Build our opener
  152. opener = compat_urllib_request.OpenerDirector()
  153. for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
  154. HTTPMethodFallback, HEADRedirectHandler,
  155. compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
  156. opener.add_handler(handler())
  157. response = opener.open(HEADRequest(url))
  158. if response is None:
  159. raise ExtractorError('Invalid URL protocol')
  160. return response
  161. def _extract_rss(self, url, video_id, doc):
  162. playlist_title = doc.find('./channel/title').text
  163. playlist_desc_el = doc.find('./channel/description')
  164. playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
  165. entries = [{
  166. '_type': 'url',
  167. 'url': e.find('link').text,
  168. 'title': e.find('title').text,
  169. } for e in doc.findall('./channel/item')]
  170. return {
  171. '_type': 'playlist',
  172. 'id': url,
  173. 'title': playlist_title,
  174. 'description': playlist_desc,
  175. 'entries': entries,
  176. }
  177. def _real_extract(self, url):
  178. parsed_url = compat_urlparse.urlparse(url)
  179. if not parsed_url.scheme:
  180. default_search = self._downloader.params.get('default_search')
  181. if default_search is None:
  182. default_search = 'auto'
  183. if default_search == 'auto':
  184. if '/' in url:
  185. self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
  186. return self.url_result('http://' + url)
  187. else:
  188. return self.url_result('ytsearch:' + url)
  189. else:
  190. assert ':' in default_search
  191. return self.url_result(default_search + url)
  192. video_id = os.path.splitext(url.split('/')[-1])[0]
  193. self.to_screen('%s: Requesting header' % video_id)
  194. try:
  195. response = self._send_head(url)
  196. # Check for redirect
  197. new_url = response.geturl()
  198. if url != new_url:
  199. self.report_following_redirect(new_url)
  200. return self.url_result(new_url)
  201. # Check for direct link to a video
  202. content_type = response.headers.get('Content-Type', '')
  203. m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
  204. if m:
  205. upload_date = response.headers.get('Last-Modified')
  206. if upload_date:
  207. upload_date = unified_strdate(upload_date)
  208. return {
  209. 'id': video_id,
  210. 'title': os.path.splitext(url_basename(url))[0],
  211. 'formats': [{
  212. 'format_id': m.group('format_id'),
  213. 'url': url,
  214. 'vcodec': 'none' if m.group('type') == 'audio' else None
  215. }],
  216. 'upload_date': upload_date,
  217. }
  218. except compat_urllib_error.HTTPError:
  219. # This may be a stupid server that doesn't like HEAD, our UA, or so
  220. pass
  221. try:
  222. webpage = self._download_webpage(url, video_id)
  223. except ValueError:
  224. # since this is the last-resort InfoExtractor, if
  225. # this error is thrown, it'll be thrown here
  226. raise ExtractorError('Failed to download URL: %s' % url)
  227. self.report_extraction(video_id)
  228. # Is it an RSS feed?
  229. try:
  230. doc = xml.etree.ElementTree.fromstring(webpage.encode('utf-8'))
  231. if doc.tag == 'rss':
  232. return self._extract_rss(url, video_id, doc)
  233. except compat_xml_parse_error:
  234. pass
  235. # it's tempting to parse this further, but you would
  236. # have to take into account all the variations like
  237. # Video Title - Site Name
  238. # Site Name | Video Title
  239. # Video Title - Tagline | Site Name
  240. # and so on and so forth; it's just not practical
  241. video_title = self._html_search_regex(
  242. r'(?s)<title>(.*?)</title>', webpage, 'video title',
  243. default='video')
  244. # video uploader is domain name
  245. video_uploader = self._search_regex(
  246. r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
  247. # Look for BrightCove:
  248. bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
  249. if bc_urls:
  250. self.to_screen('Brightcove video detected.')
  251. entries = [{
  252. '_type': 'url',
  253. 'url': smuggle_url(bc_url, {'Referer': url}),
  254. 'ie_key': 'Brightcove'
  255. } for bc_url in bc_urls]
  256. return {
  257. '_type': 'playlist',
  258. 'title': video_title,
  259. 'id': video_id,
  260. 'entries': entries,
  261. }
  262. # Look for embedded (iframe) Vimeo player
  263. mobj = re.search(
  264. r'<iframe[^>]+?src="((?:https?:)?//player\.vimeo\.com/video/.+?)"', webpage)
  265. if mobj:
  266. player_url = unescapeHTML(mobj.group(1))
  267. surl = smuggle_url(player_url, {'Referer': url})
  268. return self.url_result(surl, 'Vimeo')
  269. # Look for embedded (swf embed) Vimeo player
  270. mobj = re.search(
  271. r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
  272. if mobj:
  273. return self.url_result(mobj.group(1), 'Vimeo')
  274. # Look for embedded YouTube player
  275. matches = re.findall(r'''(?x)
  276. (?:<iframe[^>]+?src=|embedSWF\(\s*)
  277. (["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
  278. (?:embed|v)/.+?)
  279. \1''', webpage)
  280. if matches:
  281. urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
  282. for tuppl in matches]
  283. return self.playlist_result(
  284. urlrs, playlist_id=video_id, playlist_title=video_title)
  285. # Look for embedded Dailymotion player
  286. matches = re.findall(
  287. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
  288. if matches:
  289. urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Dailymotion')
  290. for tuppl in matches]
  291. return self.playlist_result(
  292. urlrs, playlist_id=video_id, playlist_title=video_title)
  293. # Look for embedded Wistia player
  294. match = re.search(
  295. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
  296. if match:
  297. return {
  298. '_type': 'url_transparent',
  299. 'url': unescapeHTML(match.group('url')),
  300. 'ie_key': 'Wistia',
  301. 'uploader': video_uploader,
  302. 'title': video_title,
  303. 'id': video_id,
  304. }
  305. # Look for embedded blip.tv player
  306. mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
  307. if mobj:
  308. return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
  309. mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
  310. if mobj:
  311. return self.url_result(mobj.group(1), 'BlipTV')
  312. # Look for Bandcamp pages with custom domain
  313. mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
  314. if mobj is not None:
  315. burl = unescapeHTML(mobj.group(1))
  316. # Don't set the extractor because it can be a track url or an album
  317. return self.url_result(burl)
  318. # Look for embedded Vevo player
  319. mobj = re.search(
  320. r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
  321. if mobj is not None:
  322. return self.url_result(mobj.group('url'))
  323. # Look for Ooyala videos
  324. mobj = re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=([^"&]+)', webpage)
  325. if mobj is not None:
  326. return OoyalaIE._build_url_result(mobj.group(1))
  327. # Look for Aparat videos
  328. mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
  329. if mobj is not None:
  330. return self.url_result(mobj.group(1), 'Aparat')
  331. # Look for MPORA videos
  332. mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
  333. if mobj is not None:
  334. return self.url_result(mobj.group(1), 'Mpora')
  335. # Look for embedded Novamov player
  336. mobj = re.search(
  337. r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage)
  338. if mobj is not None:
  339. return self.url_result(mobj.group('url'), 'Novamov')
  340. # Look for embedded Facebook player
  341. mobj = re.search(
  342. r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
  343. if mobj is not None:
  344. return self.url_result(mobj.group('url'), 'Facebook')
  345. # Look for embedded Huffington Post player
  346. mobj = re.search(
  347. r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
  348. if mobj is not None:
  349. return self.url_result(mobj.group('url'), 'HuffPost')
  350. # Look for embed.ly
  351. mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
  352. if mobj is not None:
  353. return self.url_result(mobj.group('url'))
  354. mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
  355. if mobj is not None:
  356. return self.url_result(compat_urllib_parse.unquote(mobj.group('url')))
  357. # Start with something easy: JW Player in SWFObject
  358. mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
  359. if mobj is None:
  360. # Look for gorilla-vid style embedding
  361. mobj = re.search(r'(?s)(?:jw_plugins|JWPlayerOptions).*?file\s*:\s*["\'](.*?)["\']', webpage)
  362. if mobj is None:
  363. # Broaden the search a little bit
  364. mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
  365. if mobj is None:
  366. # Broaden the search a little bit: JWPlayer JS loader
  367. mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
  368. if mobj is None:
  369. # Try to find twitter cards info
  370. mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
  371. if mobj is None:
  372. # We look for Open Graph info:
  373. # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
  374. m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
  375. # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
  376. if m_video_type is not None:
  377. mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
  378. if mobj is None:
  379. # HTML5 video
  380. mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
  381. if mobj is None:
  382. raise ExtractorError('Unsupported URL: %s' % url)
  383. # It's possible that one of the regexes
  384. # matched, but returned an empty group:
  385. if mobj.group(1) is None:
  386. raise ExtractorError('Did not find a valid video URL at %s' % url)
  387. video_url = mobj.group(1)
  388. video_url = compat_urlparse.urljoin(url, video_url)
  389. video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
  390. # Sometimes, jwplayer extraction will result in a YouTube URL
  391. if YoutubeIE.suitable(video_url):
  392. return self.url_result(video_url, 'Youtube')
  393. # here's a fun little line of code for you:
  394. video_id = os.path.splitext(video_id)[0]
  395. return {
  396. 'id': video_id,
  397. 'url': video_url,
  398. 'uploader': video_uploader,
  399. 'title': video_title,
  400. }