srf.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. # coding: utf-8
  2. from __future__ import unicode_literals
  3. import re
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. determine_ext,
  7. parse_iso8601,
  8. xpath_text,
  9. )
  10. class SrfIE(InfoExtractor):
  11. _VALID_URL = r'http://www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=(?P<id>[0-9a-f\-]{36})'
  12. _TESTS = [{
  13. 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
  14. 'md5': '4cd93523723beff51bb4bee974ee238d',
  15. 'info_dict': {
  16. 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
  17. 'display_id': 'snowden-beantragt-asyl-in-russland',
  18. 'ext': 'm4v',
  19. 'upload_date': '20130701',
  20. 'title': 'Snowden beantragt Asyl in Russland',
  21. 'timestamp': 1372713995,
  22. }
  23. }, {
  24. # No Speichern (Save) button
  25. 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
  26. 'md5': 'd97e236e80d1d24729e5d0953d276a4f',
  27. 'info_dict': {
  28. 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
  29. 'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive',
  30. 'ext': 'flv',
  31. 'upload_date': '20130710',
  32. 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
  33. 'timestamp': 1373493600,
  34. },
  35. }, {
  36. 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
  37. 'only_matching': True,
  38. }]
  39. def _real_extract(self, url):
  40. video_id = self._match_id(url)
  41. video_data = self._download_xml(
  42. 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id,
  43. video_id)
  44. display_id = re.match(self._VALID_URL, url).group('display_id')
  45. title = xpath_text(
  46. video_data, './AssetMetadatas/AssetMetadata/title', fatal=True)
  47. thumbnails = [{
  48. 'url': s.text
  49. } for s in video_data.findall('.//ImageRepresentation/url')]
  50. timestamp = parse_iso8601(xpath_text(video_data, './createdDate'))
  51. # The <duration> field in XML is different from the exact duration, skipping
  52. formats = []
  53. for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'):
  54. for url_node in item.findall('url'):
  55. quality = url_node.attrib['quality']
  56. full_url = url_node.text
  57. original_ext = determine_ext(full_url)
  58. format_id = '%s-%s' % (quality, item.attrib['protocol'])
  59. if original_ext == 'f4m':
  60. formats.extend(self._extract_f4m_formats(
  61. full_url + '?hdcore=3.4.0', video_id, f4m_id=format_id))
  62. elif original_ext == 'm3u8':
  63. formats.extend(self._extract_m3u8_formats(
  64. full_url, video_id, 'mp4', m3u8_id=format_id))
  65. else:
  66. formats.append({
  67. 'url': full_url,
  68. 'ext': original_ext,
  69. 'format_id': format_id,
  70. 'quality': 0 if 'HD' in quality else -1,
  71. 'preference': 1,
  72. })
  73. self._sort_formats(formats)
  74. subtitles = {}
  75. subtitles_data = video_data.find('Subtitles')
  76. if subtitles_data is not None:
  77. subtitles_list = [{
  78. 'url': sub.text,
  79. 'ext': determine_ext(sub.text),
  80. } for sub in subtitles_data]
  81. if subtitles_list:
  82. subtitles['de'] = subtitles_list
  83. return {
  84. 'id': video_id,
  85. 'display_id': display_id,
  86. 'formats': formats,
  87. 'title': title,
  88. 'thumbnails': thumbnails,
  89. 'timestamp': timestamp,
  90. 'subtitles': subtitles,
  91. }