srgssr.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. ExtractorError,
  7. parse_iso8601,
  8. qualities,
  9. )
  10. class SRGSSRIE(InfoExtractor):
  11. _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
  12. _ERRORS = {
  13. 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
  14. 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
  15. # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
  16. 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
  17. 'LEGAL': 'The video cannot be transmitted for legal reasons.',
  18. 'STARTDATE': 'This video is not yet available. Please try again later.',
  19. }
  20. def get_media_data(self, bu, media_type, media_id):
  21. media_data = self._download_json(
  22. 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
  23. media_id)[media_type.capitalize()]
  24. if media_data.get('block') and media_data['block'] in self._ERRORS:
  25. raise ExtractorError('%s said: %s' % (self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
  26. return media_data
  27. def _real_extract(self, url):
  28. bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
  29. if bu == 'rts':
  30. return self.url_result('rts:%s' % media_id, 'RTS')
  31. media_data = self.get_media_data(bu, media_type, media_id)
  32. metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
  33. title = metadata['title']
  34. description = metadata.get('description')
  35. created_date = media_data.get('createdDate') or metadata.get('createdDate')
  36. timestamp = parse_iso8601(created_date)
  37. thumbnails = [{
  38. 'id': image.get('id'),
  39. 'url': image['url'],
  40. } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
  41. preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
  42. formats = []
  43. for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
  44. protocol = source.get('@protocol')
  45. if protocol in ('HTTP-HDS', 'HTTP-HLS'):
  46. assets = {}
  47. for quality in source['url']:
  48. assets[quality['@quality']] = quality['text']
  49. asset_url = assets.get('HD') or assets.get('HQ') or assets.get('SD') or assets.get('MQ') or assets.get('LQ')
  50. if '.f4m' in asset_url:
  51. formats.extend(self._extract_f4m_formats(asset_url + '?hdcore=3.4.0', media_id, f4m_id='hds', fatal=False))
  52. elif '.m3u8' in asset_url:
  53. formats.extend(self._extract_m3u8_formats(asset_url, media_id, m3u8_id='hls', fatal=False))
  54. else:
  55. for asset in source['url']:
  56. asset_url = asset['text']
  57. ext = None
  58. if asset_url.startswith('rtmp'):
  59. ext = self._search_regex(r'([a-z0-9]+):[^/]+', asset_url, 'ext')
  60. formats.append({
  61. 'format_id': asset['@quality'],
  62. 'url': asset_url,
  63. 'preference': preference(asset['@quality']),
  64. 'ext': ext,
  65. })
  66. self._sort_formats(formats)
  67. return {
  68. 'id': media_id,
  69. 'title': title,
  70. 'description': description,
  71. 'timestamp': timestamp,
  72. 'thumbnails': thumbnails,
  73. 'formats': formats,
  74. }
  75. class SRGSSRPlayIE(InfoExtractor):
  76. _VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
  77. _TESTS = [{
  78. 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
  79. 'md5': '4cd93523723beff51bb4bee974ee238d',
  80. 'info_dict': {
  81. 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
  82. 'ext': 'm4v',
  83. 'upload_date': '20130701',
  84. 'title': 'Snowden beantragt Asyl in Russland',
  85. 'timestamp': 1372713995,
  86. }
  87. }, {
  88. # No Speichern (Save) button
  89. 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
  90. 'md5': '0a274ce38fda48c53c01890651985bc6',
  91. 'info_dict': {
  92. 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
  93. 'ext': 'flv',
  94. 'upload_date': '20130710',
  95. 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
  96. 'description': 'md5:88604432b60d5a38787f152dec89cd56',
  97. 'timestamp': 1373493600,
  98. },
  99. }, {
  100. 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
  101. 'info_dict': {
  102. 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
  103. 'ext': 'mp3',
  104. 'upload_date': '20151013',
  105. 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
  106. 'timestamp': 1444750398,
  107. },
  108. 'params': {
  109. # rtmp download
  110. 'skip_download': True,
  111. },
  112. }, {
  113. 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
  114. 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
  115. 'info_dict': {
  116. 'id': '6348260',
  117. 'display_id': '6348260',
  118. 'ext': 'mp4',
  119. 'duration': 1796,
  120. 'title': 'Le 19h30',
  121. 'description': '',
  122. 'uploader': '19h30',
  123. 'upload_date': '20141201',
  124. 'timestamp': 1417458600,
  125. 'thumbnail': 're:^https?://.*\.image',
  126. 'view_count': int,
  127. },
  128. 'params': {
  129. # m3u8 download
  130. 'skip_download': True,
  131. }
  132. }]
  133. def _real_extract(self, url):
  134. bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
  135. # other info can be extracted from url + '&layout=json'
  136. return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')