reddit.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. from __future__ import unicode_literals
  2. from .common import InfoExtractor
  3. from ..utils import (
  4. ExtractorError,
  5. int_or_none,
  6. float_or_none,
  7. )
  8. class RedditIE(InfoExtractor):
  9. _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
  10. _TEST = {
  11. # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
  12. 'url': 'https://v.redd.it/zv89llsvexdz',
  13. 'md5': '655d06ace653ea3b87bccfb1b27ec99d',
  14. 'info_dict': {
  15. 'id': 'zv89llsvexdz',
  16. 'ext': 'mp4',
  17. 'title': 'zv89llsvexdz',
  18. },
  19. 'params': {
  20. 'format': 'bestvideo',
  21. },
  22. }
  23. def _real_extract(self, url):
  24. video_id = self._match_id(url)
  25. formats = self._extract_m3u8_formats(
  26. 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
  27. 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
  28. formats.extend(self._extract_mpd_formats(
  29. 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
  30. mpd_id='dash', fatal=False))
  31. return {
  32. 'id': video_id,
  33. 'title': video_id,
  34. 'formats': formats,
  35. }
  36. class RedditRIE(InfoExtractor):
  37. _VALID_URL = r'https?://(?:www\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/]+)'
  38. _TESTS = [{
  39. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
  40. 'info_dict': {
  41. 'id': 'zv89llsvexdz',
  42. 'ext': 'mp4',
  43. 'title': 'That small heart attack.',
  44. 'thumbnail': r're:^https?://.*\.jpg$',
  45. 'timestamp': 1501941939,
  46. 'upload_date': '20170805',
  47. 'uploader': 'Antw87',
  48. 'like_count': int,
  49. 'dislike_count': int,
  50. 'comment_count': int,
  51. 'age_limit': 0,
  52. },
  53. 'params': {
  54. 'format': 'bestvideo',
  55. 'skip_download': True,
  56. },
  57. }, {
  58. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
  59. 'only_matching': True,
  60. }, {
  61. # imgur
  62. 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
  63. 'only_matching': True,
  64. }, {
  65. # streamable
  66. 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
  67. 'only_matching': True,
  68. }, {
  69. # youtube
  70. 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
  71. 'only_matching': True,
  72. }]
  73. def _real_extract(self, url):
  74. video_id = self._match_id(url)
  75. data = self._download_json(
  76. url + '.json', video_id)[0]['data']['children'][0]['data']
  77. video_url = data['url']
  78. # Avoid recursing into the same reddit URL
  79. if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
  80. raise ExtractorError('No media found', expected=True)
  81. over_18 = data.get('over_18')
  82. if over_18 is True:
  83. age_limit = 18
  84. elif over_18 is False:
  85. age_limit = 0
  86. else:
  87. age_limit = None
  88. return {
  89. '_type': 'url_transparent',
  90. 'url': video_url,
  91. 'title': data.get('title'),
  92. 'thumbnail': data.get('thumbnail'),
  93. 'timestamp': float_or_none(data.get('created_utc')),
  94. 'uploader': data.get('author'),
  95. 'like_count': int_or_none(data.get('ups')),
  96. 'dislike_count': int_or_none(data.get('downs')),
  97. 'comment_count': int_or_none(data.get('num_comments')),
  98. 'age_limit': age_limit,
  99. }