niconico.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import datetime
  4. import itertools
  5. import json
  6. import re
  7. from .common import InfoExtractor, SearchInfoExtractor
  8. from ..postprocessor.ffmpeg import FFmpegPostProcessor
  9. from ..compat import (
  10. compat_parse_qs,
  11. compat_str,
  12. compat_urllib_parse_urlparse,
  13. )
  14. from ..utils import (
  15. ExtractorError,
  16. dict_get,
  17. float_or_none,
  18. int_or_none,
  19. OnDemandPagedList,
  20. parse_duration,
  21. parse_iso8601,
  22. PostProcessingError,
  23. remove_start,
  24. str_or_none,
  25. try_get,
  26. unified_timestamp,
  27. urlencode_postdata,
  28. xpath_text,
  29. )
  30. class NiconicoIE(InfoExtractor):
  31. IE_NAME = 'niconico'
  32. IE_DESC = 'ニコニコ動画'
  33. _TESTS = [{
  34. 'url': 'http://www.nicovideo.jp/watch/sm22312215',
  35. 'md5': 'a5bad06f1347452102953f323c69da34s',
  36. 'info_dict': {
  37. 'id': 'sm22312215',
  38. 'ext': 'mp4',
  39. 'title': 'Big Buck Bunny',
  40. 'thumbnail': r're:https?://.*',
  41. 'uploader': 'takuya0301',
  42. 'uploader_id': '2698420',
  43. 'upload_date': '20131123',
  44. 'timestamp': int, # timestamp is unstable
  45. 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
  46. 'duration': 33,
  47. 'view_count': int,
  48. 'comment_count': int,
  49. },
  50. 'skip': 'Requires an account',
  51. }, {
  52. # File downloaded with and without credentials are different, so omit
  53. # the md5 field
  54. 'url': 'http://www.nicovideo.jp/watch/nm14296458',
  55. 'info_dict': {
  56. 'id': 'nm14296458',
  57. 'ext': 'swf',
  58. 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
  59. 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
  60. 'thumbnail': r're:https?://.*',
  61. 'uploader': 'りょうた',
  62. 'uploader_id': '18822557',
  63. 'upload_date': '20110429',
  64. 'timestamp': 1304065916,
  65. 'duration': 209,
  66. },
  67. 'skip': 'Requires an account',
  68. }, {
  69. # 'video exists but is marked as "deleted"
  70. # md5 is unstable
  71. 'url': 'http://www.nicovideo.jp/watch/sm10000',
  72. 'info_dict': {
  73. 'id': 'sm10000',
  74. 'ext': 'unknown_video',
  75. 'description': 'deleted',
  76. 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
  77. 'thumbnail': r're:https?://.*',
  78. 'upload_date': '20071224',
  79. 'timestamp': int, # timestamp field has different value if logged in
  80. 'duration': 304,
  81. 'view_count': int,
  82. },
  83. 'skip': 'Requires an account',
  84. }, {
  85. 'url': 'http://www.nicovideo.jp/watch/so22543406',
  86. 'info_dict': {
  87. 'id': '1388129933',
  88. 'ext': 'mp4',
  89. 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
  90. 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
  91. 'thumbnail': r're:https?://.*',
  92. 'timestamp': 1388851200,
  93. 'upload_date': '20140104',
  94. 'uploader': 'アニメロチャンネル',
  95. 'uploader_id': '312',
  96. },
  97. 'skip': 'The viewing period of the video you were searching for has expired.',
  98. }, {
  99. # video not available via `getflv`; "old" HTML5 video
  100. 'url': 'http://www.nicovideo.jp/watch/sm1151009',
  101. 'md5': '8fa81c364eb619d4085354eab075598a',
  102. 'info_dict': {
  103. 'id': 'sm1151009',
  104. 'ext': 'mp4',
  105. 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
  106. 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
  107. 'thumbnail': r're:https?://.*',
  108. 'duration': 184,
  109. 'timestamp': 1190868283,
  110. 'upload_date': '20070927',
  111. 'uploader': 'denden2',
  112. 'uploader_id': '1392194',
  113. 'view_count': int,
  114. 'comment_count': int,
  115. },
  116. 'skip': 'Requires an account',
  117. }, {
  118. # "New" HTML5 video
  119. # md5 is unstable
  120. 'url': 'http://www.nicovideo.jp/watch/sm31464864',
  121. 'info_dict': {
  122. 'id': 'sm31464864',
  123. 'ext': 'mp4',
  124. 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
  125. 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
  126. 'timestamp': 1498514060,
  127. 'upload_date': '20170626',
  128. 'uploader': 'ゲスト',
  129. 'uploader_id': '40826363',
  130. 'thumbnail': r're:https?://.*',
  131. 'duration': 198,
  132. 'view_count': int,
  133. 'comment_count': int,
  134. },
  135. 'skip': 'Requires an account',
  136. }, {
  137. # Video without owner
  138. 'url': 'http://www.nicovideo.jp/watch/sm18238488',
  139. 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
  140. 'info_dict': {
  141. 'id': 'sm18238488',
  142. 'ext': 'mp4',
  143. 'title': '【実写版】ミュータントタートルズ',
  144. 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
  145. 'timestamp': 1341160408,
  146. 'upload_date': '20120701',
  147. 'uploader': None,
  148. 'uploader_id': None,
  149. 'thumbnail': r're:https?://.*',
  150. 'duration': 5271,
  151. 'view_count': int,
  152. 'comment_count': int,
  153. },
  154. 'skip': 'Requires an account',
  155. }, {
  156. 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
  157. 'only_matching': True,
  158. }]
  159. _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
  160. _NETRC_MACHINE = 'niconico'
  161. _API_HEADERS = {
  162. 'X-Frontend-ID': '6',
  163. 'X-Frontend-Version': '0'
  164. }
  165. def _real_initialize(self):
  166. self._login()
  167. def _login(self):
  168. username, password = self._get_login_info()
  169. # No authentication to be performed
  170. if not username:
  171. return True
  172. # Log in
  173. login_ok = True
  174. login_form_strs = {
  175. 'mail_tel': username,
  176. 'password': password,
  177. }
  178. urlh = self._request_webpage(
  179. 'https://account.nicovideo.jp/api/v1/login', None,
  180. note='Logging in', errnote='Unable to log in',
  181. data=urlencode_postdata(login_form_strs))
  182. if urlh is False:
  183. login_ok = False
  184. else:
  185. parts = compat_urllib_parse_urlparse(urlh.geturl())
  186. if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
  187. login_ok = False
  188. if not login_ok:
  189. self._downloader.report_warning('unable to log in: bad username or password')
  190. return login_ok
  191. def _get_heartbeat_info(self, info_dict):
  192. video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
  193. api_data = (
  194. info_dict.get('_api_data')
  195. or self._parse_json(
  196. self._html_search_regex(
  197. 'data-api-data="([^"]+)"',
  198. self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id),
  199. 'API data', default='{}'),
  200. video_id))
  201. session_api_data = try_get(api_data, lambda x: x['media']['delivery']['movie']['session'])
  202. session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0])
  203. def ping():
  204. status = try_get(
  205. self._download_json(
  206. 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id,
  207. query={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])},
  208. note='Acquiring permission for downloading video',
  209. headers=self._API_HEADERS),
  210. lambda x: x['meta']['status'])
  211. if status != 200:
  212. self.report_warning('Failed to acquire permission for playing video. The video may not download.')
  213. yesno = lambda x: 'yes' if x else 'no'
  214. # m3u8 (encryption)
  215. if try_get(api_data, lambda x: x['media']['delivery']['encryption']) is not None:
  216. protocol = 'm3u8'
  217. encryption = self._parse_json(session_api_data['token'], video_id)['hls_encryption']
  218. session_api_http_parameters = {
  219. 'parameters': {
  220. 'hls_parameters': {
  221. 'encryption': {
  222. encryption: {
  223. 'encrypted_key': try_get(api_data, lambda x: x['media']['delivery']['encryption']['encryptedKey']),
  224. 'key_uri': try_get(api_data, lambda x: x['media']['delivery']['encryption']['keyUri'])
  225. }
  226. },
  227. 'transfer_preset': '',
  228. 'use_ssl': yesno(session_api_endpoint['isSsl']),
  229. 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
  230. 'segment_duration': 6000,
  231. }
  232. }
  233. }
  234. # http
  235. else:
  236. protocol = 'http'
  237. session_api_http_parameters = {
  238. 'parameters': {
  239. 'http_output_download_parameters': {
  240. 'use_ssl': yesno(session_api_endpoint['isSsl']),
  241. 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
  242. }
  243. }
  244. }
  245. session_response = self._download_json(
  246. session_api_endpoint['url'], video_id,
  247. query={'_format': 'json'},
  248. headers={'Content-Type': 'application/json'},
  249. note='Downloading JSON metadata for %s' % info_dict['format_id'],
  250. data=json.dumps({
  251. 'session': {
  252. 'client_info': {
  253. 'player_id': session_api_data.get('playerId'),
  254. },
  255. 'content_auth': {
  256. 'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]),
  257. 'content_key_timeout': session_api_data.get('contentKeyTimeout'),
  258. 'service_id': 'nicovideo',
  259. 'service_user_id': session_api_data.get('serviceUserId')
  260. },
  261. 'content_id': session_api_data.get('contentId'),
  262. 'content_src_id_sets': [{
  263. 'content_src_ids': [{
  264. 'src_id_to_mux': {
  265. 'audio_src_ids': [audio_src_id],
  266. 'video_src_ids': [video_src_id],
  267. }
  268. }]
  269. }],
  270. 'content_type': 'movie',
  271. 'content_uri': '',
  272. 'keep_method': {
  273. 'heartbeat': {
  274. 'lifetime': session_api_data.get('heartbeatLifetime')
  275. }
  276. },
  277. 'priority': session_api_data.get('priority'),
  278. 'protocol': {
  279. 'name': 'http',
  280. 'parameters': {
  281. 'http_parameters': session_api_http_parameters
  282. }
  283. },
  284. 'recipe_id': session_api_data.get('recipeId'),
  285. 'session_operation_auth': {
  286. 'session_operation_auth_by_signature': {
  287. 'signature': session_api_data.get('signature'),
  288. 'token': session_api_data.get('token'),
  289. }
  290. },
  291. 'timing_constraint': 'unlimited'
  292. }
  293. }).encode())
  294. info_dict['url'] = session_response['data']['session']['content_uri']
  295. info_dict['protocol'] = protocol
  296. # get heartbeat info
  297. heartbeat_info_dict = {
  298. 'url': session_api_endpoint['url'] + '/' + session_response['data']['session']['id'] + '?_format=json&_method=PUT',
  299. 'data': json.dumps(session_response['data']),
  300. # interval, convert milliseconds to seconds, then halve to make a buffer.
  301. 'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000),
  302. 'ping': ping
  303. }
  304. return info_dict, heartbeat_info_dict
  305. def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
  306. def parse_format_id(id_code):
  307. mobj = re.match(r'''(?x)
  308. (?:archive_)?
  309. (?:(?P<codec>[^_]+)_)?
  310. (?:(?P<br>[\d]+)kbps_)?
  311. (?:(?P<res>[\d+]+)p_)?
  312. ''', '%s_' % id_code)
  313. return mobj.groupdict() if mobj else {}
  314. protocol = 'niconico_dmc'
  315. format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
  316. vdict = parse_format_id(video_quality['id'])
  317. adict = parse_format_id(audio_quality['id'])
  318. resolution = try_get(video_quality, lambda x: x['metadata']['resolution'], dict) or {'height': vdict.get('res')}
  319. vbr = try_get(video_quality, lambda x: x['metadata']['bitrate'], float)
  320. return {
  321. 'url': '%s:%s/%s/%s' % (protocol, video_id, video_quality['id'], audio_quality['id']),
  322. 'format_id': format_id,
  323. 'format_note': 'DMC %s' % try_get(video_quality, lambda x: x['metadata']['label'], compat_str),
  324. 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
  325. 'vcodec': vdict.get('codec'),
  326. 'acodec': adict.get('codec'),
  327. 'vbr': float_or_none(vbr, 1000) or float_or_none(vdict.get('br')),
  328. 'abr': float_or_none(audio_quality.get('bitrate'), 1000) or float_or_none(adict.get('br')),
  329. 'height': int_or_none(resolution.get('height', vdict.get('res'))),
  330. 'width': int_or_none(resolution.get('width')),
  331. 'quality': -2 if 'low' in format_id else -1, # Default quality value is -1
  332. 'protocol': protocol,
  333. 'http_headers': {
  334. 'Origin': 'https://www.nicovideo.jp',
  335. 'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
  336. }
  337. }
  338. def _real_extract(self, url):
  339. video_id = self._match_id(url)
  340. # Get video webpage for API data.
  341. webpage, handle = self._download_webpage_handle(
  342. 'http://www.nicovideo.jp/watch/' + video_id, video_id)
  343. if video_id.startswith('so'):
  344. video_id = self._match_id(handle.geturl())
  345. api_data = self._parse_json(self._html_search_regex(
  346. 'data-api-data="([^"]+)"', webpage,
  347. 'API data', default='{}'), video_id)
  348. def get_video_info_web(items):
  349. return dict_get(api_data['video'], items)
  350. # Get video info
  351. video_info_xml = self._download_xml(
  352. 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
  353. video_id, note='Downloading video info page')
  354. def get_video_info_xml(items):
  355. if not isinstance(items, list):
  356. items = [items]
  357. for item in items:
  358. ret = xpath_text(video_info_xml, './/' + item)
  359. if ret:
  360. return ret
  361. if get_video_info_xml('error'):
  362. error_code = get_video_info_xml('code')
  363. if error_code == 'DELETED':
  364. raise ExtractorError('The video has been deleted.',
  365. expected=True)
  366. elif error_code == 'NOT_FOUND':
  367. raise ExtractorError('The video is not found.',
  368. expected=True)
  369. elif error_code == 'COMMUNITY':
  370. self.to_screen('%s: The video is community members only.' % video_id)
  371. else:
  372. raise ExtractorError('%s reports error: %s' % (self.IE_NAME, error_code))
  373. # Start extracting video formats
  374. formats = []
  375. # Get HTML5 videos info
  376. quality_info = try_get(api_data, lambda x: x['media']['delivery']['movie'])
  377. if not quality_info:
  378. raise ExtractorError('The video can\'t be downloaded', expected=True)
  379. for audio_quality in quality_info.get('audios') or {}:
  380. for video_quality in quality_info.get('videos') or {}:
  381. if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
  382. continue
  383. formats.append(self._extract_format_for_quality(
  384. api_data, video_id, audio_quality, video_quality))
  385. # Get flv/swf info
  386. timestamp = None
  387. video_real_url = try_get(api_data, lambda x: x['video']['smileInfo']['url'])
  388. if video_real_url:
  389. is_economy = video_real_url.endswith('low')
  390. if is_economy:
  391. self.report_warning('Site is currently in economy mode! You will only have access to lower quality streams')
  392. # Invoking ffprobe to determine resolution
  393. pp = FFmpegPostProcessor(self._downloader)
  394. cookies = self._get_cookies('https://nicovideo.jp').output(header='', sep='; path=/; domain=nicovideo.jp;\n')
  395. self.to_screen('%s: %s' % (video_id, 'Checking smile format with ffprobe'))
  396. try:
  397. metadata = pp.get_metadata_object(video_real_url, ['-cookies', cookies])
  398. except PostProcessingError as err:
  399. raise ExtractorError(err.msg, expected=True)
  400. v_stream = a_stream = {}
  401. # Some complex swf files doesn't have video stream (e.g. nm4809023)
  402. for stream in metadata['streams']:
  403. if stream['codec_type'] == 'video':
  404. v_stream = stream
  405. elif stream['codec_type'] == 'audio':
  406. a_stream = stream
  407. # Community restricted videos seem to have issues with the thumb API not returning anything at all
  408. filesize = int(
  409. (get_video_info_xml('size_high') if not is_economy else get_video_info_xml('size_low'))
  410. or metadata['format']['size']
  411. )
  412. extension = (
  413. get_video_info_xml('movie_type')
  414. or 'mp4' if 'mp4' in metadata['format']['format_name'] else metadata['format']['format_name']
  415. )
  416. # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'.
  417. timestamp = (
  418. parse_iso8601(get_video_info_web('first_retrieve'))
  419. or unified_timestamp(get_video_info_web('postedDateTime'))
  420. )
  421. metadata_timestamp = (
  422. parse_iso8601(try_get(v_stream, lambda x: x['tags']['creation_time']))
  423. or timestamp if extension != 'mp4' else 0
  424. )
  425. # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts
  426. smile_threshold_timestamp = parse_iso8601('2016-12-08T00:00:00+09:00')
  427. is_source = timestamp < smile_threshold_timestamp or metadata_timestamp > 0
  428. # If movie file size is unstable, old server movie is not source movie.
  429. if filesize > 1:
  430. formats.append({
  431. 'url': video_real_url,
  432. 'format_id': 'smile' if not is_economy else 'smile_low',
  433. 'format_note': 'SMILEVIDEO source' if not is_economy else 'SMILEVIDEO low quality',
  434. 'ext': extension,
  435. 'container': extension,
  436. 'vcodec': v_stream.get('codec_name'),
  437. 'acodec': a_stream.get('codec_name'),
  438. # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209)
  439. 'tbr': int_or_none(metadata['format'].get('bit_rate'), scale=1000),
  440. 'vbr': int_or_none(v_stream.get('bit_rate'), scale=1000),
  441. 'abr': int_or_none(a_stream.get('bit_rate'), scale=1000),
  442. 'height': int_or_none(v_stream.get('height')),
  443. 'width': int_or_none(v_stream.get('width')),
  444. 'source_preference': 5 if not is_economy else -2,
  445. 'quality': 5 if is_source and not is_economy else None,
  446. 'filesize': filesize
  447. })
  448. self._sort_formats(formats)
  449. # Start extracting information
  450. title = (
  451. get_video_info_xml('title') # prefer to get the untranslated original title
  452. or get_video_info_web(['originalTitle', 'title'])
  453. or self._og_search_title(webpage, default=None)
  454. or self._html_search_regex(
  455. r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
  456. webpage, 'video title'))
  457. watch_api_data_string = self._html_search_regex(
  458. r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
  459. webpage, 'watch api data', default=None)
  460. watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
  461. video_detail = watch_api_data.get('videoDetail', {})
  462. thumbnail = (
  463. self._html_search_regex(r'<meta property="og:image" content="([^"]+)">', webpage, 'thumbnail data', default=None)
  464. or dict_get( # choose highest from 720p to 240p
  465. get_video_info_web('thumbnail'),
  466. ['ogp', 'player', 'largeUrl', 'middleUrl', 'url'])
  467. or self._html_search_meta('image', webpage, 'thumbnail', default=None)
  468. or video_detail.get('thumbnail'))
  469. description = get_video_info_web('description')
  470. if not timestamp:
  471. match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
  472. if match:
  473. timestamp = parse_iso8601(match.replace('+', ':00+'))
  474. if not timestamp and video_detail.get('postedAt'):
  475. timestamp = parse_iso8601(
  476. video_detail['postedAt'].replace('/', '-'),
  477. delimiter=' ', timezone=datetime.timedelta(hours=9))
  478. timestamp = timestamp or try_get(api_data, lambda x: parse_iso8601(x['video']['registeredAt']))
  479. view_count = int_or_none(get_video_info_web(['view_counter', 'viewCount']))
  480. if not view_count:
  481. match = self._html_search_regex(
  482. r'>Views: <strong[^>]*>([^<]+)</strong>',
  483. webpage, 'view count', default=None)
  484. if match:
  485. view_count = int_or_none(match.replace(',', ''))
  486. view_count = (
  487. view_count
  488. or video_detail.get('viewCount')
  489. or try_get(api_data, lambda x: x['video']['count']['view']))
  490. comment_count = (
  491. int_or_none(get_video_info_web('comment_num'))
  492. or video_detail.get('commentCount')
  493. or try_get(api_data, lambda x: x['video']['count']['comment']))
  494. if not comment_count:
  495. match = self._html_search_regex(
  496. r'>Comments: <strong[^>]*>([^<]+)</strong>',
  497. webpage, 'comment count', default=None)
  498. if match:
  499. comment_count = int_or_none(match.replace(',', ''))
  500. duration = (parse_duration(
  501. get_video_info_web('length')
  502. or self._html_search_meta(
  503. 'video:duration', webpage, 'video duration', default=None))
  504. or video_detail.get('length')
  505. or get_video_info_web('duration'))
  506. webpage_url = get_video_info_web('watch_url') or url
  507. # for channel movie and community movie
  508. channel_id = try_get(
  509. api_data,
  510. (lambda x: x['channel']['globalId'],
  511. lambda x: x['community']['globalId']))
  512. channel = try_get(
  513. api_data,
  514. (lambda x: x['channel']['name'],
  515. lambda x: x['community']['name']))
  516. # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
  517. # in the JSON, which will cause None to be returned instead of {}.
  518. owner = try_get(api_data, lambda x: x.get('owner'), dict) or {}
  519. uploader_id = str_or_none(
  520. get_video_info_web(['ch_id', 'user_id'])
  521. or owner.get('id')
  522. or channel_id
  523. )
  524. uploader = (
  525. get_video_info_web(['ch_name', 'user_nickname'])
  526. or owner.get('nickname')
  527. or channel
  528. )
  529. return {
  530. 'id': video_id,
  531. '_api_data': api_data,
  532. 'title': title,
  533. 'formats': formats,
  534. 'thumbnail': thumbnail,
  535. 'description': description,
  536. 'uploader': uploader,
  537. 'timestamp': timestamp,
  538. 'uploader_id': uploader_id,
  539. 'channel': channel,
  540. 'channel_id': channel_id,
  541. 'view_count': view_count,
  542. 'comment_count': comment_count,
  543. 'duration': duration,
  544. 'webpage_url': webpage_url,
  545. }
  546. class NiconicoPlaylistIE(InfoExtractor):
  547. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/|my/)?mylist/(?P<id>\d+)'
  548. _TESTS = [{
  549. 'url': 'http://www.nicovideo.jp/mylist/27411728',
  550. 'info_dict': {
  551. 'id': '27411728',
  552. 'title': 'AKB48のオールナイトニッポン',
  553. 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
  554. 'uploader': 'のっく',
  555. 'uploader_id': '805442',
  556. },
  557. 'playlist_mincount': 225,
  558. }, {
  559. 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
  560. 'only_matching': True,
  561. }]
  562. _API_HEADERS = {
  563. 'X-Frontend-ID': '6',
  564. 'X-Frontend-Version': '0'
  565. }
  566. def _real_extract(self, url):
  567. list_id = self._match_id(url)
  568. def get_page_data(pagenum, pagesize):
  569. return self._download_json(
  570. 'http://nvapi.nicovideo.jp/v2/mylists/' + list_id, list_id,
  571. query={'page': 1 + pagenum, 'pageSize': pagesize},
  572. headers=self._API_HEADERS).get('data').get('mylist')
  573. data = get_page_data(0, 1)
  574. title = data.get('name')
  575. description = data.get('description')
  576. uploader = data.get('owner').get('name')
  577. uploader_id = data.get('owner').get('id')
  578. def pagefunc(pagenum):
  579. data = get_page_data(pagenum, 25)
  580. return ({
  581. '_type': 'url',
  582. 'url': 'http://www.nicovideo.jp/watch/' + item.get('watchId'),
  583. } for item in data.get('items'))
  584. return {
  585. '_type': 'playlist',
  586. 'id': list_id,
  587. 'title': title,
  588. 'description': description,
  589. 'uploader': uploader,
  590. 'uploader_id': uploader_id,
  591. 'entries': OnDemandPagedList(pagefunc, 25),
  592. }
  593. class NicovideoSearchBaseIE(InfoExtractor):
  594. _MAX_RESULTS = float('inf')
  595. def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'):
  596. query = query or {}
  597. pages = [query['page']] if 'page' in query else itertools.count(1)
  598. for page_num in pages:
  599. query['page'] = str(page_num)
  600. webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
  601. results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.+?)(?=["\'])', webpage)
  602. for item in results:
  603. yield self.url_result('http://www.nicovideo.jp/watch/%s' % item, 'Niconico', item)
  604. if not results:
  605. break
  606. def _get_n_results(self, query, n):
  607. entries = self._entries(self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query)
  608. if n < self._MAX_RESULTS:
  609. entries = itertools.islice(entries, 0, n)
  610. return self.playlist_result(entries, query, query)
  611. class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
  612. IE_DESC = 'Nico video search'
  613. IE_NAME = 'nicovideo:search'
  614. _SEARCH_KEY = 'nicosearch'
  615. def _search_results(self, query):
  616. return self._entries(
  617. self._proto_relative_url('//www.nicovideo.jp/search/%s' % query), query)
  618. class NicovideoSearchURLIE(NicovideoSearchBaseIE):
  619. IE_NAME = '%s_url' % NicovideoSearchIE.IE_NAME
  620. IE_DESC = 'Nico video search URLs'
  621. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
  622. _TESTS = [{
  623. 'url': 'http://www.nicovideo.jp/search/sm9',
  624. 'info_dict': {
  625. 'id': 'sm9',
  626. 'title': 'sm9'
  627. },
  628. 'playlist_mincount': 40,
  629. }, {
  630. 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
  631. 'info_dict': {
  632. 'id': 'sm9',
  633. 'title': 'sm9'
  634. },
  635. 'playlist_count': 31,
  636. }]
  637. def _real_extract(self, url):
  638. query = self._match_id(url)
  639. return self.playlist_result(self._entries(url, query), query, query)
  640. class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
  641. IE_DESC = 'Nico video search, newest first'
  642. IE_NAME = '%s:date' % NicovideoSearchIE.IE_NAME
  643. _SEARCH_KEY = 'nicosearchdate'
  644. _TESTS = [{
  645. 'url': 'nicosearchdateall:a',
  646. 'info_dict': {
  647. 'id': 'a',
  648. 'title': 'a'
  649. },
  650. 'playlist_mincount': 1610,
  651. }]
  652. _START_DATE = datetime.date(2007, 1, 1)
  653. _RESULTS_PER_PAGE = 32
  654. _MAX_PAGES = 50
  655. def _entries(self, url, item_id, start_date=None, end_date=None):
  656. start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
  657. # If the last page has a full page of videos, we need to break down the query interval further
  658. last_page_len = len(list(self._get_entries_for_date(
  659. url, item_id, start_date, end_date, self._MAX_PAGES,
  660. note='Checking number of videos from {0} to {1}'.format(start_date, end_date))))
  661. if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
  662. midpoint = start_date + ((end_date - start_date) // 2)
  663. for entry in itertools.chain(
  664. iter(self._entries(url, item_id, midpoint, end_date)),
  665. iter(self._entries(url, item_id, start_date, midpoint))):
  666. yield entry
  667. else:
  668. self.to_screen('{0}: Downloading results from {1} to {2}'.format(item_id, start_date, end_date))
  669. for entry in iter(self._get_entries_for_date(
  670. url, item_id, start_date, end_date, note=' Downloading page %(page)s')):
  671. yield entry
  672. def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
  673. query = {
  674. 'start': compat_str(start_date),
  675. 'end': compat_str(end_date or start_date),
  676. 'sort': 'f',
  677. 'order': 'd',
  678. }
  679. if page_num:
  680. query['page'] = compat_str(page_num)
  681. for entry in iter(super(NicovideoSearchDateIE, self)._entries(url, item_id, query=query, note=note)):
  682. yield entry
  683. class NiconicoUserIE(InfoExtractor):
  684. _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
  685. _TEST = {
  686. 'url': 'https://www.nicovideo.jp/user/419948',
  687. 'info_dict': {
  688. 'id': '419948',
  689. },
  690. 'playlist_mincount': 101,
  691. }
  692. _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s"
  693. _PAGE_SIZE = 100
  694. _API_HEADERS = {
  695. 'X-Frontend-ID': '6',
  696. 'X-Frontend-Version': '0'
  697. }
  698. def _entries(self, list_id):
  699. total_count = 1
  700. count = page_num = 0
  701. while count < total_count:
  702. json_parsed = self._download_json(
  703. self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
  704. headers=self._API_HEADERS,
  705. note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
  706. if not page_num:
  707. total_count = int_or_none(json_parsed['data'].get('totalCount'))
  708. for entry in json_parsed["data"]["items"]:
  709. count += 1
  710. yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
  711. page_num += 1
  712. def _real_extract(self, url):
  713. list_id = self._match_id(url)
  714. return self.playlist_result(self._entries(list_id), list_id)