facebook.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. from __future__ import unicode_literals
  2. import json
  3. import re
  4. import socket
  5. from .common import InfoExtractor
  6. from ..compat import (
  7. compat_etree_fromstring,
  8. compat_http_client,
  9. compat_urllib_error,
  10. compat_urllib_parse_unquote,
  11. compat_urllib_parse_unquote_plus,
  12. )
  13. from ..utils import (
  14. error_to_compat_str,
  15. ExtractorError,
  16. limit_length,
  17. sanitized_Request,
  18. urlencode_postdata,
  19. get_element_by_id,
  20. clean_html,
  21. )
  22. class FacebookIE(InfoExtractor):
  23. _VALID_URL = r'''(?x)
  24. (?:
  25. https?://
  26. (?:\w+\.)?facebook\.com/
  27. (?:[^#]*?\#!/)?
  28. (?:
  29. (?:
  30. video/video\.php|
  31. photo\.php|
  32. video\.php|
  33. video/embed|
  34. story\.php
  35. )\?(?:.*?)(?:v|video_id|story_fbid)=|
  36. [^/]+/videos/(?:[^/]+/)?|
  37. [^/]+/posts/
  38. )|
  39. facebook:
  40. )
  41. (?P<id>[0-9]+)
  42. '''
  43. _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
  44. _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
  45. _NETRC_MACHINE = 'facebook'
  46. IE_NAME = 'facebook'
  47. _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
  48. _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
  49. _TESTS = [{
  50. 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
  51. 'md5': '6a40d33c0eccbb1af76cf0485a052659',
  52. 'info_dict': {
  53. 'id': '637842556329505',
  54. 'ext': 'mp4',
  55. 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
  56. 'uploader': 'Tennis on Facebook',
  57. }
  58. }, {
  59. 'note': 'Video without discernible title',
  60. 'url': 'https://www.facebook.com/video.php?v=274175099429670',
  61. 'info_dict': {
  62. 'id': '274175099429670',
  63. 'ext': 'mp4',
  64. 'title': 'Facebook video #274175099429670',
  65. 'uploader': 'Asif Nawab Butt',
  66. },
  67. 'expected_warnings': [
  68. 'title'
  69. ]
  70. }, {
  71. 'note': 'Video with DASH manifest',
  72. 'url': 'https://www.facebook.com/video.php?v=957955867617029',
  73. 'md5': '54706e4db4f5ad58fbad82dde1f1213f',
  74. 'info_dict': {
  75. 'id': '957955867617029',
  76. 'ext': 'mp4',
  77. 'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
  78. 'uploader': 'Demy de Zeeuw',
  79. },
  80. }, {
  81. 'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
  82. 'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
  83. 'info_dict': {
  84. 'id': '544765982287235',
  85. 'ext': 'mp4',
  86. 'title': '"What are you doing running in the snow?"',
  87. 'uploader': 'FailArmy',
  88. }
  89. }, {
  90. 'url': 'https://www.facebook.com/video.php?v=10204634152394104',
  91. 'only_matching': True,
  92. }, {
  93. 'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
  94. 'only_matching': True,
  95. }, {
  96. 'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
  97. 'only_matching': True,
  98. }, {
  99. 'url': 'facebook:544765982287235',
  100. 'only_matching': True,
  101. }, {
  102. 'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
  103. 'only_matching': True,
  104. }]
  105. def _login(self):
  106. (useremail, password) = self._get_login_info()
  107. if useremail is None:
  108. return
  109. login_page_req = sanitized_Request(self._LOGIN_URL)
  110. self._set_cookie('facebook.com', 'locale', 'en_US')
  111. login_page = self._download_webpage(login_page_req, None,
  112. note='Downloading login page',
  113. errnote='Unable to download login page')
  114. lsd = self._search_regex(
  115. r'<input type="hidden" name="lsd" value="([^"]*)"',
  116. login_page, 'lsd')
  117. lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
  118. login_form = {
  119. 'email': useremail,
  120. 'pass': password,
  121. 'lsd': lsd,
  122. 'lgnrnd': lgnrnd,
  123. 'next': 'http://facebook.com/home.php',
  124. 'default_persistent': '0',
  125. 'legacy_return': '1',
  126. 'timezone': '-60',
  127. 'trynum': '1',
  128. }
  129. request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
  130. request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  131. try:
  132. login_results = self._download_webpage(request, None,
  133. note='Logging in', errnote='unable to fetch login page')
  134. if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
  135. error = self._html_search_regex(
  136. r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
  137. login_results, 'login error', default=None, group='error')
  138. if error:
  139. raise ExtractorError('Unable to login: %s' % error, expected=True)
  140. self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
  141. return
  142. fb_dtsg = self._search_regex(
  143. r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
  144. h = self._search_regex(
  145. r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
  146. if not fb_dtsg or not h:
  147. return
  148. check_form = {
  149. 'fb_dtsg': fb_dtsg,
  150. 'h': h,
  151. 'name_action_selected': 'dont_save',
  152. }
  153. check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
  154. check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
  155. check_response = self._download_webpage(check_req, None,
  156. note='Confirming login')
  157. if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
  158. self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
  159. except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
  160. self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
  161. return
  162. def _real_initialize(self):
  163. self._login()
  164. def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
  165. req = sanitized_Request(url)
  166. req.add_header('User-Agent', self._CHROME_USER_AGENT)
  167. webpage = self._download_webpage(req, video_id)
  168. video_data = None
  169. BEFORE = '{swf.addParam(param[0], param[1]);});\n'
  170. AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
  171. m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
  172. if m:
  173. data = dict(json.loads(m.group(1)))
  174. params_raw = compat_urllib_parse_unquote(data['params'])
  175. video_data = json.loads(params_raw)['video_data']
  176. def video_data_list2dict(video_data):
  177. ret = {}
  178. for item in video_data:
  179. format_id = item['stream_type']
  180. ret.setdefault(format_id, []).append(item)
  181. return ret
  182. if not video_data:
  183. server_js_data = self._parse_json(self._search_regex(
  184. r'handleServerJS\(({.+})\);', webpage, 'server js data'), video_id)
  185. for item in server_js_data.get('instances', []):
  186. if item[1][0] == 'VideoConfig':
  187. video_data = video_data_list2dict(item[2][0]['videoData'])
  188. break
  189. if not video_data:
  190. if not fatal_if_no_video:
  191. return webpage, False
  192. m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
  193. if m_msg is not None:
  194. raise ExtractorError(
  195. 'The video is not available, Facebook said: "%s"' % m_msg.group(1),
  196. expected=True)
  197. else:
  198. raise ExtractorError('Cannot parse data')
  199. formats = []
  200. for format_id, f in video_data.items():
  201. if not f or not isinstance(f, list):
  202. continue
  203. for quality in ('sd', 'hd'):
  204. for src_type in ('src', 'src_no_ratelimit'):
  205. src = f[0].get('%s_%s' % (quality, src_type))
  206. if src:
  207. preference = -10 if format_id == 'progressive' else 0
  208. if quality == 'hd':
  209. preference += 5
  210. formats.append({
  211. 'format_id': '%s_%s_%s' % (format_id, quality, src_type),
  212. 'url': src,
  213. 'preference': preference,
  214. })
  215. dash_manifest = f[0].get('dash_manifest')
  216. if dash_manifest:
  217. formats.extend(self._parse_mpd_formats(
  218. compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
  219. if not formats:
  220. raise ExtractorError('Cannot find video formats')
  221. self._sort_formats(formats)
  222. video_title = self._html_search_regex(
  223. r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
  224. default=None)
  225. if not video_title:
  226. video_title = self._html_search_regex(
  227. r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
  228. webpage, 'alternative title', default=None)
  229. video_title = limit_length(video_title, 80)
  230. if not video_title:
  231. video_title = 'Facebook video #%s' % video_id
  232. uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
  233. info_dict = {
  234. 'id': video_id,
  235. 'title': video_title,
  236. 'formats': formats,
  237. 'uploader': uploader,
  238. }
  239. return webpage, info_dict
  240. def _real_extract(self, url):
  241. video_id = self._match_id(url)
  242. real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
  243. webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
  244. if info_dict:
  245. return info_dict
  246. if '/posts/' in url:
  247. entries = [
  248. self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
  249. for video_id in self._parse_json(
  250. self._search_regex(
  251. r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
  252. webpage, 'video ids', group='ids'),
  253. video_id)]
  254. return self.playlist_result(entries, video_id)
  255. else:
  256. _, info_dict = self._extract_from_url(
  257. self._VIDEO_PAGE_TEMPLATE % video_id,
  258. video_id, fatal_if_no_video=True)
  259. return info_dict