npo.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..utils import (
  5. fix_xml_ampersands,
  6. parse_duration,
  7. qualities,
  8. strip_jsonp,
  9. unified_strdate,
  10. )
  11. class NPOBaseIE(InfoExtractor):
  12. def _get_token(self, video_id):
  13. token_page = self._download_webpage(
  14. 'http://ida.omroep.nl/npoplayer/i.js',
  15. video_id, note='Downloading token')
  16. token = self._search_regex(
  17. r'npoplayer\.token = "(.+?)"', token_page, 'token')
  18. # Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
  19. token_l = list(token)
  20. first = second = None
  21. for i in range(5, len(token_l) - 4):
  22. if token_l[i].isdigit():
  23. if first is None:
  24. first = i
  25. elif second is None:
  26. second = i
  27. if first is None or second is None:
  28. first = 12
  29. second = 13
  30. token_l[first], token_l[second] = token_l[second], token_l[first]
  31. return ''.join(token_l)
  32. class NPOIE(NPOBaseIE):
  33. IE_NAME = 'npo'
  34. IE_DESC = 'npo.nl and ntr.nl'
  35. _VALID_URL = r'''(?x)
  36. (?:
  37. npo:|
  38. https?://
  39. (?:www\.)?
  40. (?:
  41. npo\.nl/(?!live|radio)(?:[^/]+/){2}|
  42. ntr\.nl/(?:[^/]+/){2,}|
  43. omroepwnl\.nl/video/fragment/[^/]+__
  44. )
  45. )
  46. (?P<id>[^/?#]+)
  47. '''
  48. _TESTS = [
  49. {
  50. 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
  51. 'md5': '4b3f9c429157ec4775f2c9cb7b911016',
  52. 'info_dict': {
  53. 'id': 'VPWON_1220719',
  54. 'ext': 'm4v',
  55. 'title': 'Nieuwsuur',
  56. 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
  57. 'upload_date': '20140622',
  58. },
  59. },
  60. {
  61. 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
  62. 'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
  63. 'info_dict': {
  64. 'id': 'VARA_101191800',
  65. 'ext': 'm4v',
  66. 'title': 'De Mega Mike & Mega Thomas show',
  67. 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
  68. 'upload_date': '20090227',
  69. 'duration': 2400,
  70. },
  71. },
  72. {
  73. 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
  74. 'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
  75. 'info_dict': {
  76. 'id': 'VPWON_1169289',
  77. 'ext': 'm4v',
  78. 'title': 'Tegenlicht',
  79. 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
  80. 'upload_date': '20130225',
  81. 'duration': 3000,
  82. },
  83. },
  84. {
  85. 'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
  86. 'info_dict': {
  87. 'id': 'WO_VPRO_043706',
  88. 'ext': 'wmv',
  89. 'title': 'De nieuwe mens - Deel 1',
  90. 'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
  91. 'duration': 4680,
  92. },
  93. 'params': {
  94. # mplayer mms download
  95. 'skip_download': True,
  96. }
  97. },
  98. # non asf in streams
  99. {
  100. 'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
  101. 'md5': 'b3da13de374cbe2d5332a7e910bef97f',
  102. 'info_dict': {
  103. 'id': 'WO_NOS_762771',
  104. 'ext': 'mp4',
  105. 'title': 'Hoe gaat Europa verder na Parijs?',
  106. },
  107. },
  108. {
  109. 'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
  110. 'md5': '01c6a2841675995da1f0cf776f03a9c3',
  111. 'info_dict': {
  112. 'id': 'VPWON_1233944',
  113. 'ext': 'm4v',
  114. 'title': 'Aap, poot, pies',
  115. 'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
  116. 'upload_date': '20150508',
  117. 'duration': 599,
  118. },
  119. },
  120. {
  121. 'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
  122. 'md5': 'd30cd8417b8b9bca1fdff27428860d08',
  123. 'info_dict': {
  124. 'id': 'POW_00996502',
  125. 'ext': 'm4v',
  126. 'title': '''"Dit is wel een 'landslide'..."''',
  127. 'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
  128. 'upload_date': '20150508',
  129. 'duration': 462,
  130. },
  131. }
  132. ]
  133. def _real_extract(self, url):
  134. video_id = self._match_id(url)
  135. return self._get_info(video_id)
  136. def _get_info(self, video_id):
  137. metadata = self._download_json(
  138. 'http://e.omroep.nl/metadata/%s' % video_id,
  139. video_id,
  140. # We have to remove the javascript callback
  141. transform_source=strip_jsonp,
  142. )
  143. # For some videos actual video id (prid) is different (e.g. for
  144. # http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
  145. # video id is POMS_WNL_853698 but prid is POW_00996502)
  146. video_id = metadata.get('prid') or video_id
  147. token = self._get_token(video_id)
  148. formats = []
  149. pubopties = metadata.get('pubopties')
  150. if pubopties:
  151. quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
  152. for format_id in pubopties:
  153. format_info = self._download_json(
  154. 'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
  155. % (video_id, format_id, token),
  156. video_id, 'Downloading %s JSON' % format_id)
  157. if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
  158. continue
  159. streams = format_info.get('streams')
  160. if streams:
  161. video_info = self._download_json(
  162. streams[0] + '&type=json',
  163. video_id, 'Downloading %s stream JSON' % format_id)
  164. else:
  165. video_info = format_info
  166. video_url = video_info.get('url')
  167. if not video_url:
  168. continue
  169. if format_id == 'adaptive':
  170. formats.extend(self._extract_m3u8_formats(video_url, video_id))
  171. else:
  172. formats.append({
  173. 'url': video_url,
  174. 'format_id': format_id,
  175. 'quality': quality(format_id),
  176. })
  177. streams = metadata.get('streams')
  178. if streams:
  179. for i, stream in enumerate(streams):
  180. stream_url = stream.get('url')
  181. if not stream_url:
  182. continue
  183. if '.asf' not in stream_url:
  184. formats.append({
  185. 'url': stream_url,
  186. 'quality': stream.get('kwaliteit'),
  187. })
  188. continue
  189. asx = self._download_xml(
  190. stream_url, video_id,
  191. 'Downloading stream %d ASX playlist' % i,
  192. transform_source=fix_xml_ampersands)
  193. ref = asx.find('./ENTRY/Ref')
  194. if ref is None:
  195. continue
  196. video_url = ref.get('href')
  197. if not video_url:
  198. continue
  199. formats.append({
  200. 'url': video_url,
  201. 'ext': stream.get('formaat', 'asf'),
  202. 'quality': stream.get('kwaliteit'),
  203. })
  204. self._sort_formats(formats)
  205. subtitles = {}
  206. if metadata.get('tt888') == 'ja':
  207. subtitles['nl'] = [{
  208. 'ext': 'vtt',
  209. 'url': 'http://e.omroep.nl/tt888/%s' % video_id,
  210. }]
  211. return {
  212. 'id': video_id,
  213. # prefer aflevering_titel if any since titel may be too generic, e.g.
  214. # http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html
  215. 'title': metadata.get('aflevering_titel') or metadata['titel'],
  216. 'description': metadata.get('info'),
  217. 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
  218. 'upload_date': unified_strdate(metadata.get('gidsdatum')),
  219. 'duration': parse_duration(metadata.get('tijdsduur')),
  220. 'formats': formats,
  221. 'subtitles': subtitles,
  222. }
  223. class NPOLiveIE(NPOBaseIE):
  224. IE_NAME = 'npo.nl:live'
  225. _VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
  226. _TEST = {
  227. 'url': 'http://www.npo.nl/live/npo-1',
  228. 'info_dict': {
  229. 'id': 'LI_NEDERLAND1_136692',
  230. 'display_id': 'npo-1',
  231. 'ext': 'mp4',
  232. 'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  233. 'description': 'Livestream',
  234. 'is_live': True,
  235. },
  236. 'params': {
  237. 'skip_download': True,
  238. }
  239. }
  240. def _real_extract(self, url):
  241. display_id = self._match_id(url)
  242. webpage = self._download_webpage(url, display_id)
  243. live_id = self._search_regex(
  244. r'data-prid="([^"]+)"', webpage, 'live id')
  245. metadata = self._download_json(
  246. 'http://e.omroep.nl/metadata/%s' % live_id,
  247. display_id, transform_source=strip_jsonp)
  248. token = self._get_token(display_id)
  249. formats = []
  250. streams = metadata.get('streams')
  251. if streams:
  252. for stream in streams:
  253. stream_type = stream.get('type').lower()
  254. # smooth streaming is not supported
  255. if stream_type in ['ss', 'ms']:
  256. continue
  257. stream_info = self._download_json(
  258. 'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
  259. % (stream.get('url'), token),
  260. display_id, 'Downloading %s JSON' % stream_type)
  261. if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
  262. continue
  263. stream_url = self._download_json(
  264. stream_info['stream'], display_id,
  265. 'Downloading %s URL' % stream_type,
  266. 'Unable to download %s URL' % stream_type,
  267. transform_source=strip_jsonp, fatal=False)
  268. if not stream_url:
  269. continue
  270. if stream_type == 'hds':
  271. f4m_formats = self._extract_f4m_formats(stream_url, display_id)
  272. # f4m downloader downloads only piece of live stream
  273. for f4m_format in f4m_formats:
  274. f4m_format['preference'] = -1
  275. formats.extend(f4m_formats)
  276. elif stream_type == 'hls':
  277. formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
  278. else:
  279. formats.append({
  280. 'url': stream_url,
  281. 'preference': -10,
  282. })
  283. self._sort_formats(formats)
  284. return {
  285. 'id': live_id,
  286. 'display_id': display_id,
  287. 'title': self._live_title(metadata['titel']),
  288. 'description': metadata['info'],
  289. 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
  290. 'formats': formats,
  291. 'is_live': True,
  292. }
  293. class NPORadioIE(InfoExtractor):
  294. IE_NAME = 'npo.nl:radio'
  295. _VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
  296. _TEST = {
  297. 'url': 'http://www.npo.nl/radio/radio-1',
  298. 'info_dict': {
  299. 'id': 'radio-1',
  300. 'ext': 'mp3',
  301. 'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
  302. 'is_live': True,
  303. },
  304. 'params': {
  305. 'skip_download': True,
  306. }
  307. }
  308. @staticmethod
  309. def _html_get_attribute_regex(attribute):
  310. return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
  311. def _real_extract(self, url):
  312. video_id = self._match_id(url)
  313. webpage = self._download_webpage(url, video_id)
  314. title = self._html_search_regex(
  315. self._html_get_attribute_regex('data-channel'), webpage, 'title')
  316. stream = self._parse_json(
  317. self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
  318. video_id)
  319. codec = stream.get('codec')
  320. return {
  321. 'id': video_id,
  322. 'url': stream['url'],
  323. 'title': self._live_title(title),
  324. 'acodec': codec,
  325. 'ext': codec,
  326. 'is_live': True,
  327. }
  328. class NPORadioFragmentIE(InfoExtractor):
  329. IE_NAME = 'npo.nl:radio:fragment'
  330. _VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
  331. _TEST = {
  332. 'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
  333. 'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
  334. 'info_dict': {
  335. 'id': '174356',
  336. 'ext': 'mp3',
  337. 'title': 'Jubileumconcert Willeke Alberti',
  338. },
  339. }
  340. def _real_extract(self, url):
  341. audio_id = self._match_id(url)
  342. webpage = self._download_webpage(url, audio_id)
  343. title = self._html_search_regex(
  344. r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
  345. webpage, 'title')
  346. audio_url = self._search_regex(
  347. r"data-streams='([^']+)'", webpage, 'audio url')
  348. return {
  349. 'id': audio_id,
  350. 'url': audio_url,
  351. 'title': title,
  352. }
  353. class VPROIE(NPOIE):
  354. _VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
  355. _TESTS = [
  356. {
  357. 'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
  358. 'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
  359. 'info_dict': {
  360. 'id': 'VPWON_1169289',
  361. 'ext': 'm4v',
  362. 'title': 'De toekomst komt uit Afrika',
  363. 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
  364. 'upload_date': '20130225',
  365. },
  366. },
  367. {
  368. 'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
  369. 'info_dict': {
  370. 'id': 'sergio-herman',
  371. 'title': 'Sergio Herman: Fucking perfect',
  372. },
  373. 'playlist_count': 2,
  374. },
  375. {
  376. # playlist with youtube embed
  377. 'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
  378. 'info_dict': {
  379. 'id': 'education-education',
  380. 'title': '2Doc',
  381. },
  382. 'playlist_count': 2,
  383. }
  384. ]
  385. def _real_extract(self, url):
  386. playlist_id = self._match_id(url)
  387. webpage = self._download_webpage(url, playlist_id)
  388. entries = [
  389. self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
  390. for video_id in re.findall(r'data-media-id="([^"]+)"', webpage)
  391. ]
  392. playlist_title = self._search_regex(
  393. r'<title>\s*([^>]+?)\s*-\s*Teledoc\s*-\s*VPRO\s*</title>',
  394. webpage, 'playlist title', default=None) or self._og_search_title(webpage)
  395. return self.playlist_result(entries, playlist_id, playlist_title)
  396. class WNLIE(InfoExtractor):
  397. _VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
  398. _TEST = {
  399. 'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
  400. 'info_dict': {
  401. 'id': 'vandaag-de-dag-6-mei',
  402. 'title': 'Vandaag de Dag 6 mei',
  403. },
  404. 'playlist_count': 4,
  405. }
  406. def _real_extract(self, url):
  407. playlist_id = self._match_id(url)
  408. webpage = self._download_webpage(url, playlist_id)
  409. entries = [
  410. self.url_result('npo:%s' % video_id, 'NPO')
  411. for video_id, part in re.findall(
  412. r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>(Deel \d+)', webpage)
  413. ]
  414. playlist_title = self._html_search_regex(
  415. r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>',
  416. webpage, 'playlist title')
  417. return self.playlist_result(entries, playlist_id, playlist_title)