Browse Source

Switch codebase to use sanitized_Request instead of
compat_urllib_request.Request

[downloader/dash] Use sanitized_Request

[downloader/http] Use sanitized_Request

[atresplayer] Use sanitized_Request

[bambuser] Use sanitized_Request

[bliptv] Use sanitized_Request

[brightcove] Use sanitized_Request

[cbs] Use sanitized_Request

[ceskatelevize] Use sanitized_Request

[collegerama] Use sanitized_Request

[extractor/common] Use sanitized_Request

[crunchyroll] Use sanitized_Request

[dailymotion] Use sanitized_Request

[dcn] Use sanitized_Request

[dramafever] Use sanitized_Request

[dumpert] Use sanitized_Request

[eitb] Use sanitized_Request

[escapist] Use sanitized_Request

[everyonesmixtape] Use sanitized_Request

[extremetube] Use sanitized_Request

[facebook] Use sanitized_Request

[fc2] Use sanitized_Request

[flickr] Use sanitized_Request

[4tube] Use sanitized_Request

[gdcvault] Use sanitized_Request

[extractor/generic] Use sanitized_Request

[hearthisat] Use sanitized_Request

[hotnewhiphop] Use sanitized_Request

[hypem] Use sanitized_Request

[iprima] Use sanitized_Request

[ivi] Use sanitized_Request

[keezmovies] Use sanitized_Request

[letv] Use sanitized_Request

[lynda] Use sanitized_Request

[metacafe] Use sanitized_Request

[minhateca] Use sanitized_Request

[miomio] Use sanitized_Request

[meovideo] Use sanitized_Request

[mofosex] Use sanitized_Request

[moniker] Use sanitized_Request

[mooshare] Use sanitized_Request

[movieclips] Use sanitized_Request

[mtv] Use sanitized_Request

[myvideo] Use sanitized_Request

[neteasemusic] Use sanitized_Request

[nfb] Use sanitized_Request

[niconico] Use sanitized_Request

[noco] Use sanitized_Request

[nosvideo] Use sanitized_Request

[novamov] Use sanitized_Request

[nowness] Use sanitized_Request

[nuvid] Use sanitized_Request

[played] Use sanitized_Request

[pluralsight] Use sanitized_Request

[pornhub] Use sanitized_Request

[pornotube] Use sanitized_Request

[primesharetv] Use sanitized_Request

[promptfile] Use sanitized_Request

[qqmusic] Use sanitized_Request

[rtve] Use sanitized_Request

[safari] Use sanitized_Request

[sandia] Use sanitized_Request

[shared] Use sanitized_Request

[sharesix] Use sanitized_Request

[sina] Use sanitized_Request

[smotri] Use sanitized_Request

[sohu] Use sanitized_Request

[spankwire] Use sanitized_Request

[sportdeutschland] Use sanitized_Request

[streamcloud] Use sanitized_Request

[streamcz] Use sanitized_Request

[tapely] Use sanitized_Request

[tube8] Use sanitized_Request

[tubitv] Use sanitized_Request

[twitch] Use sanitized_Request

[twitter] Use sanitized_Request

[udemy] Use sanitized_Request

[vbox7] Use sanitized_Request

[veoh] Use sanitized_Request

[vessel] Use sanitized_Request

[vevo] Use sanitized_Request

[viddler] Use sanitized_Request

[videomega] Use sanitized_Request

[viewvster] Use sanitized_Request

[viki] Use sanitized_Request

[vk] Use sanitized_Request

[vodlocker] Use sanitized_Request

[voicerepublic] Use sanitized_Request

[wistia] Use sanitized_Request

[xfileshare] Use sanitized_Request

[xtube] Use sanitized_Request

[xvideos] Use sanitized_Request

[yandexmusic] Use sanitized_Request

[youku] Use sanitized_Request

[youporn] Use sanitized_Request

[youtube] Use sanitized_Request

[patreon] Use sanitized_Request

[extractor/common] Remove unused import

[nfb] PEP 8

Sergey M? 9 years ago
parent
commit
5c2266df4b
97 changed files with 271 additions and 353 deletions
  1. 1 1
      youtube_dl/YoutubeDL.py
  2. 2 2
      youtube_dl/downloader/dash.py
  3. 4 6
      youtube_dl/downloader/http.py
  4. 3 3
      youtube_dl/extractor/atresplayer.py
  5. 3 3
      youtube_dl/extractor/bambuser.py
  6. 3 5
      youtube_dl/extractor/bliptv.py
  7. 3 3
      youtube_dl/extractor/brightcove.py
  8. 5 3
      youtube_dl/extractor/cbs.py
  9. 3 3
      youtube_dl/extractor/ceskatelevize.py
  10. 2 2
      youtube_dl/extractor/collegerama.py
  11. 2 2
      youtube_dl/extractor/common.py
  12. 5 4
      youtube_dl/extractor/crunchyroll.py
  13. 3 5
      youtube_dl/extractor/dailymotion.py
  14. 3 5
      youtube_dl/extractor/dcn.py
  15. 2 2
      youtube_dl/extractor/dramafever.py
  16. 5 3
      youtube_dl/extractor/dumpert.py
  17. 2 2
      youtube_dl/extractor/eitb.py
  18. 2 3
      youtube_dl/extractor/escapist.py
  19. 3 5
      youtube_dl/extractor/everyonesmixtape.py
  20. 2 2
      youtube_dl/extractor/extremetube.py
  21. 4 4
      youtube_dl/extractor/facebook.py
  22. 3 2
      youtube_dl/extractor/fc2.py
  23. 2 2
      youtube_dl/extractor/flickr.py
  24. 2 4
      youtube_dl/extractor/fourtube.py
  25. 3 5
      youtube_dl/extractor/gdcvault.py
  26. 3 3
      youtube_dl/extractor/generic.py
  27. 3 5
      youtube_dl/extractor/hearthisat.py
  28. 3 5
      youtube_dl/extractor/hotnewhiphop.py
  29. 4 6
      youtube_dl/extractor/hypem.py
  30. 2 4
      youtube_dl/extractor/iprima.py
  31. 2 4
      youtube_dl/extractor/ivi.py
  32. 3 5
      youtube_dl/extractor/keezmovies.py
  33. 2 2
      youtube_dl/extractor/letv.py
  34. 3 3
      youtube_dl/extractor/lynda.py
  35. 3 3
      youtube_dl/extractor/metacafe.py
  36. 3 5
      youtube_dl/extractor/minhateca.py
  37. 2 2
      youtube_dl/extractor/miomio.py
  38. 3 5
      youtube_dl/extractor/moevideo.py
  39. 2 2
      youtube_dl/extractor/mofosex.py
  40. 4 6
      youtube_dl/extractor/moniker.py
  41. 3 5
      youtube_dl/extractor/mooshare.py
  42. 2 4
      youtube_dl/extractor/movieclips.py
  43. 2 2
      youtube_dl/extractor/mtv.py
  44. 2 2
      youtube_dl/extractor/myvideo.py
  45. 2 2
      youtube_dl/extractor/neteasemusic.py
  46. 5 6
      youtube_dl/extractor/nfb.py
  47. 3 3
      youtube_dl/extractor/niconico.py
  48. 2 2
      youtube_dl/extractor/noco.py
  49. 2 4
      youtube_dl/extractor/nosvideo.py
  50. 3 5
      youtube_dl/extractor/novamov.py
  51. 5 5
      youtube_dl/extractor/nowness.py
  52. 2 4
      youtube_dl/extractor/nuvid.py
  53. 2 4
      youtube_dl/extractor/patreon.py
  54. 3 5
      youtube_dl/extractor/played.py
  55. 3 3
      youtube_dl/extractor/pluralsight.py
  56. 2 2
      youtube_dl/extractor/pornhub.py
  57. 4 6
      youtube_dl/extractor/pornotube.py
  58. 5 5
      youtube_dl/extractor/primesharetv.py
  59. 3 5
      youtube_dl/extractor/promptfile.py
  60. 2 2
      youtube_dl/extractor/qqmusic.py
  61. 2 2
      youtube_dl/extractor/rtve.py
  62. 3 5
      youtube_dl/extractor/safari.py
  63. 3 5
      youtube_dl/extractor/sandia.py
  64. 3 5
      youtube_dl/extractor/shared.py
  65. 3 5
      youtube_dl/extractor/sharesix.py
  66. 3 5
      youtube_dl/extractor/sina.py
  67. 4 6
      youtube_dl/extractor/smotri.py
  68. 2 2
      youtube_dl/extractor/sohu.py
  69. 2 2
      youtube_dl/extractor/spankwire.py
  70. 2 4
      youtube_dl/extractor/sportdeutschland.py
  71. 3 5
      youtube_dl/extractor/streamcloud.py
  72. 2 4
      youtube_dl/extractor/streamcz.py
  73. 2 4
      youtube_dl/extractor/tapely.py
  74. 3 5
      youtube_dl/extractor/tube8.py
  75. 3 5
      youtube_dl/extractor/tubitv.py
  76. 3 3
      youtube_dl/extractor/twitch.py
  77. 2 2
      youtube_dl/extractor/twitter.py
  78. 3 2
      youtube_dl/extractor/udemy.py
  79. 2 2
      youtube_dl/extractor/vbox7.py
  80. 2 4
      youtube_dl/extractor/veoh.py
  81. 2 2
      youtube_dl/extractor/vessel.py
  82. 3 5
      youtube_dl/extractor/vevo.py
  83. 2 4
      youtube_dl/extractor/viddler.py
  84. 2 2
      youtube_dl/extractor/videomega.py
  85. 2 2
      youtube_dl/extractor/viewster.py
  86. 3 3
      youtube_dl/extractor/viki.py
  87. 2 2
      youtube_dl/extractor/vk.py
  88. 3 5
      youtube_dl/extractor/vodlocker.py
  89. 3 5
      youtube_dl/extractor/voicerepublic.py
  90. 5 3
      youtube_dl/extractor/wistia.py
  91. 3 5
      youtube_dl/extractor/xfileshare.py
  92. 3 5
      youtube_dl/extractor/xtube.py
  93. 3 5
      youtube_dl/extractor/xvideos.py
  94. 2 2
      youtube_dl/extractor/yandexmusic.py
  95. 5 4
      youtube_dl/extractor/youku.py
  96. 2 2
      youtube_dl/extractor/youporn.py
  97. 3 3
      youtube_dl/extractor/youtube.py

+ 1 - 1
youtube_dl/YoutubeDL.py

@@ -1189,7 +1189,7 @@ class YoutubeDL(object):
         return res
         return res
 
 
     def _calc_cookies(self, info_dict):
     def _calc_cookies(self, info_dict):
-        pr = compat_urllib_request.Request(info_dict['url'])
+        pr = sanitized_Request(info_dict['url'])
         self.cookiejar.add_cookie_header(pr)
         self.cookiejar.add_cookie_header(pr)
         return pr.get_header('Cookie')
         return pr.get_header('Cookie')
 
 

+ 2 - 2
youtube_dl/downloader/dash.py

@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import FileDownloader
 from .common import FileDownloader
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
 
 
 
 
 class DashSegmentsFD(FileDownloader):
 class DashSegmentsFD(FileDownloader):
@@ -22,7 +22,7 @@ class DashSegmentsFD(FileDownloader):
 
 
         def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
         def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
             self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
             self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
-            req = compat_urllib_request.Request(target_url)
+            req = sanitized_Request(target_url)
             if remaining_bytes is not None:
             if remaining_bytes is not None:
                 req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
                 req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
 
 

+ 4 - 6
youtube_dl/downloader/http.py

@@ -7,14 +7,12 @@ import time
 import re
 import re
 
 
 from .common import FileDownloader
 from .common import FileDownloader
-from ..compat import (
-    compat_urllib_request,
-    compat_urllib_error,
-)
+from ..compat import compat_urllib_error
 from ..utils import (
 from ..utils import (
     ContentTooShortError,
     ContentTooShortError,
     encodeFilename,
     encodeFilename,
     sanitize_open,
     sanitize_open,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -29,8 +27,8 @@ class HttpFD(FileDownloader):
         add_headers = info_dict.get('http_headers')
         add_headers = info_dict.get('http_headers')
         if add_headers:
         if add_headers:
             headers.update(add_headers)
             headers.update(add_headers)
-        basic_request = compat_urllib_request.Request(url, None, headers)
-        request = compat_urllib_request.Request(url, None, headers)
+        basic_request = sanitized_Request(url, None, headers)
+        request = sanitized_Request(url, None, headers)
 
 
         is_test = self.params.get('test', False)
         is_test = self.params.get('test', False)
 
 

+ 3 - 3
youtube_dl/extractor/atresplayer.py

@@ -7,11 +7,11 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     float_or_none,
     float_or_none,
+    sanitized_Request,
     xpath_text,
     xpath_text,
     ExtractorError,
     ExtractorError,
 )
 )
@@ -63,7 +63,7 @@ class AtresPlayerIE(InfoExtractor):
             'j_password': password,
             'j_password': password,
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         response = self._download_webpage(
         response = self._download_webpage(
@@ -94,7 +94,7 @@ class AtresPlayerIE(InfoExtractor):
 
 
         formats = []
         formats = []
         for fmt in ['windows', 'android_tablet']:
         for fmt in ['windows', 'android_tablet']:
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token))
                 self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token))
             request.add_header('User-Agent', self._USER_AGENT)
             request.add_header('User-Agent', self._USER_AGENT)
 
 

+ 3 - 3
youtube_dl/extractor/bambuser.py

@@ -6,13 +6,13 @@ import itertools
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_str,
     compat_str,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
     float_or_none,
     float_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -57,7 +57,7 @@ class BambuserIE(InfoExtractor):
             'pass': password,
             'pass': password,
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         request.add_header('Referer', self._LOGIN_URL)
         request.add_header('Referer', self._LOGIN_URL)
         response = self._download_webpage(
         response = self._download_webpage(
@@ -126,7 +126,7 @@ class BambuserChannelIE(InfoExtractor):
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&sort=created&access_mode=0%2C1%2C2&limit={count}'
                 '&method=broadcast&format=json&vid_older_than={last}'
                 '&method=broadcast&format=json&vid_older_than={last}'
             ).format(user=user, count=self._STEP, last=last_id)
             ).format(user=user, count=self._STEP, last=last_id)
-            req = compat_urllib_request.Request(req_url)
+            req = sanitized_Request(req_url)
             # Without setting this header, we wouldn't get any result
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
             data = self._download_json(
             data = self._download_json(

+ 3 - 5
youtube_dl/extractor/bliptv.py

@@ -4,14 +4,12 @@ import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 
 
-from ..compat import (
-    compat_urllib_request,
-    compat_urlparse,
-)
+from ..compat import compat_urlparse
 from ..utils import (
 from ..utils import (
     clean_html,
     clean_html,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     unescapeHTML,
     unescapeHTML,
     xpath_text,
     xpath_text,
     xpath_with_ns,
     xpath_with_ns,
@@ -219,7 +217,7 @@ class BlipTVIE(InfoExtractor):
         for lang, url in subtitles_urls.items():
         for lang, url in subtitles_urls.items():
             # For some weird reason, blip.tv serves a video instead of subtitles
             # For some weird reason, blip.tv serves a video instead of subtitles
             # when we request with a common UA
             # when we request with a common UA
-            req = compat_urllib_request.Request(url)
+            req = sanitized_Request(url)
             req.add_header('User-Agent', 'youtube-dl')
             req.add_header('User-Agent', 'youtube-dl')
             subtitles[lang] = [{
             subtitles[lang] = [{
                 # The extension is 'srt' but it's actually an 'ass' file
                 # The extension is 'srt' but it's actually an 'ass' file

+ 3 - 3
youtube_dl/extractor/brightcove.py

@@ -11,7 +11,6 @@ from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
     compat_xml_parse_error,
     compat_xml_parse_error,
 )
 )
@@ -24,6 +23,7 @@ from ..utils import (
     js_to_json,
     js_to_json,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     unescapeHTML,
     unescapeHTML,
     unsmuggle_url,
     unsmuggle_url,
 )
 )
@@ -250,7 +250,7 @@ class BrightcoveLegacyIE(InfoExtractor):
 
 
     def _get_video_info(self, video_id, query_str, query, referer=None):
     def _get_video_info(self, video_id, query_str, query, referer=None):
         request_url = self._FEDERATED_URL_TEMPLATE % query_str
         request_url = self._FEDERATED_URL_TEMPLATE % query_str
-        req = compat_urllib_request.Request(request_url)
+        req = sanitized_Request(request_url)
         linkBase = query.get('linkBaseURL')
         linkBase = query.get('linkBaseURL')
         if linkBase is not None:
         if linkBase is not None:
             referer = linkBase[0]
             referer = linkBase[0]
@@ -443,7 +443,7 @@ class BrightcoveNewIE(InfoExtractor):
                 r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
                 r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
                 webpage, 'policy key', group='pk')
                 webpage, 'policy key', group='pk')
 
 
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
             'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
             % (account_id, video_id),
             % (account_id, video_id),
             headers={'Accept': 'application/json;pk=%s' % policy_key})
             headers={'Accept': 'application/json;pk=%s' % policy_key})

+ 5 - 3
youtube_dl/extractor/cbs.py

@@ -1,8 +1,10 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
-from ..utils import smuggle_url
+from ..utils import (
+    sanitized_Request,
+    smuggle_url,
+)
 
 
 
 
 class CBSIE(InfoExtractor):
 class CBSIE(InfoExtractor):
@@ -48,7 +50,7 @@ class CBSIE(InfoExtractor):
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
         display_id = self._match_id(url)
         display_id = self._match_id(url)
-        request = compat_urllib_request.Request(url)
+        request = sanitized_Request(url)
         # Android UA is served with higher quality (720p) streams (see
         # Android UA is served with higher quality (720p) streams (see
         # https://github.com/rg3/youtube-dl/issues/7490)
         # https://github.com/rg3/youtube-dl/issues/7490)
         request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)')
         request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)')

+ 3 - 3
youtube_dl/extractor/ceskatelevize.py

@@ -5,7 +5,6 @@ import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
-    compat_urllib_request,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
@@ -13,6 +12,7 @@ from ..compat import (
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     float_or_none,
     float_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -100,7 +100,7 @@ class CeskaTelevizeIE(InfoExtractor):
             'requestSource': 'iVysilani',
             'requestSource': 'iVysilani',
         }
         }
 
 
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
             'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
             data=compat_urllib_parse.urlencode(data))
             data=compat_urllib_parse.urlencode(data))
 
 
@@ -115,7 +115,7 @@ class CeskaTelevizeIE(InfoExtractor):
         if playlist_url == 'error_region':
         if playlist_url == 'error_region':
             raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
             raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
 
 
-        req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url))
+        req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
         req.add_header('Referer', url)
         req.add_header('Referer', url)
 
 
         playlist_title = self._og_search_title(webpage)
         playlist_title = self._og_search_title(webpage)

+ 2 - 2
youtube_dl/extractor/collegerama.py

@@ -3,10 +3,10 @@ from __future__ import unicode_literals
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     float_or_none,
     float_or_none,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -52,7 +52,7 @@ class CollegeRamaIE(InfoExtractor):
             }
             }
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
             'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
             json.dumps(player_options_request))
             json.dumps(player_options_request))
         request.add_header('Content-Type', 'application/json')
         request.add_header('Content-Type', 'application/json')

+ 2 - 2
youtube_dl/extractor/common.py

@@ -19,7 +19,6 @@ from ..compat import (
     compat_urllib_error,
     compat_urllib_error,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
     compat_str,
     compat_str,
     compat_etree_fromstring,
     compat_etree_fromstring,
@@ -37,6 +36,7 @@ from ..utils import (
     int_or_none,
     int_or_none,
     RegexNotFoundError,
     RegexNotFoundError,
     sanitize_filename,
     sanitize_filename,
+    sanitized_Request,
     unescapeHTML,
     unescapeHTML,
     unified_strdate,
     unified_strdate,
     url_basename,
     url_basename,
@@ -1285,7 +1285,7 @@ class InfoExtractor(object):
 
 
     def _get_cookies(self, url):
     def _get_cookies(self, url):
         """ Return a compat_cookies.SimpleCookie with the cookies for the url """
         """ Return a compat_cookies.SimpleCookie with the cookies for the url """
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         self._downloader.cookiejar.add_cookie_header(req)
         self._downloader.cookiejar.add_cookie_header(req)
         return compat_cookies.SimpleCookie(req.get_header('Cookie'))
         return compat_cookies.SimpleCookie(req.get_header('Cookie'))
 
 

+ 5 - 4
youtube_dl/extractor/crunchyroll.py

@@ -23,6 +23,7 @@ from ..utils import (
     int_or_none,
     int_or_none,
     lowercase_escape,
     lowercase_escape,
     remove_end,
     remove_end,
+    sanitized_Request,
     unified_strdate,
     unified_strdate,
     urlencode_postdata,
     urlencode_postdata,
     xpath_text,
     xpath_text,
@@ -46,7 +47,7 @@ class CrunchyrollBaseIE(InfoExtractor):
             'name': username,
             'name': username,
             'password': password,
             'password': password,
         })
         })
-        login_request = compat_urllib_request.Request(login_url, data)
+        login_request = sanitized_Request(login_url, data)
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self._download_webpage(login_request, None, False, 'Wrong login info')
         self._download_webpage(login_request, None, False, 'Wrong login info')
 
 
@@ -55,7 +56,7 @@ class CrunchyrollBaseIE(InfoExtractor):
 
 
     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
-                   else compat_urllib_request.Request(url_or_request))
+                   else sanitized_Request(url_or_request))
         # Accept-Language must be set explicitly to accept any language to avoid issues
         # Accept-Language must be set explicitly to accept any language to avoid issues
         # similar to https://github.com/rg3/youtube-dl/issues/6797.
         # similar to https://github.com/rg3/youtube-dl/issues/6797.
         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
@@ -307,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             'video_uploader', fatal=False)
             'video_uploader', fatal=False)
 
 
         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
-        playerdata_req = compat_urllib_request.Request(playerdata_url)
+        playerdata_req = sanitized_Request(playerdata_url)
         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
@@ -319,7 +320,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
         for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage):
         for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage):
             stream_quality, stream_format = self._FORMAT_IDS[fmt]
             stream_quality, stream_format = self._FORMAT_IDS[fmt]
             video_format = fmt + 'p'
             video_format = fmt + 'p'
-            streamdata_req = compat_urllib_request.Request(
+            streamdata_req = sanitized_Request(
                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
                 % (stream_id, stream_format, stream_quality),
                 % (stream_id, stream_format, stream_quality),
                 compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
                 compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))

+ 3 - 5
youtube_dl/extractor/dailymotion.py

@@ -7,15 +7,13 @@ import itertools
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 
 
-from ..compat import (
-    compat_str,
-    compat_urllib_request,
-)
+from ..compat import compat_str
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     determine_ext,
     determine_ext,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     str_to_int,
     str_to_int,
     unescapeHTML,
     unescapeHTML,
 )
 )
@@ -25,7 +23,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
     @staticmethod
     @staticmethod
     def _build_request(url):
     def _build_request(url):
         """Build a request with the family filter disabled"""
         """Build a request with the family filter disabled"""
-        request = compat_urllib_request.Request(url)
+        request = sanitized_Request(url)
         request.add_header('Cookie', 'family_filter=off; ff=off')
         request.add_header('Cookie', 'family_filter=off; ff=off')
         return request
         return request
 
 

+ 3 - 5
youtube_dl/extractor/dcn.py

@@ -2,13 +2,11 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -36,7 +34,7 @@ class DCNIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
             'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
             headers={'Origin': 'http://www.dcndigital.ae'})
             headers={'Origin': 'http://www.dcndigital.ae'})
 
 

+ 2 - 2
youtube_dl/extractor/dramafever.py

@@ -7,7 +7,6 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_HTTPError,
     compat_HTTPError,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
@@ -16,6 +15,7 @@ from ..utils import (
     determine_ext,
     determine_ext,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -51,7 +51,7 @@ class DramaFeverBaseIE(InfoExtractor):
             'password': password,
             'password': password,
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         response = self._download_webpage(
         response = self._download_webpage(
             request, None, 'Logging in as %s' % username)
             request, None, 'Logging in as %s' % username)

+ 5 - 3
youtube_dl/extractor/dumpert.py

@@ -5,8 +5,10 @@ import base64
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
-from ..utils import qualities
+from ..utils import (
+    qualities,
+    sanitized_Request,
+)
 
 
 
 
 class DumpertIE(InfoExtractor):
 class DumpertIE(InfoExtractor):
@@ -32,7 +34,7 @@ class DumpertIE(InfoExtractor):
         protocol = mobj.group('protocol')
         protocol = mobj.group('protocol')
 
 
         url = '%s://www.dumpert.nl/mediabase/%s' % (protocol, video_id)
         url = '%s://www.dumpert.nl/mediabase/%s' % (protocol, video_id)
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'nsfw=1; cpc=10')
         req.add_header('Cookie', 'nsfw=1; cpc=10')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 2 - 2
youtube_dl/extractor/eitb.py

@@ -2,11 +2,11 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     float_or_none,
     float_or_none,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -57,7 +57,7 @@ class EitbIE(InfoExtractor):
 
 
         hls_url = media.get('HLS_SURL')
         hls_url = media.get('HLS_SURL')
         if hls_url:
         if hls_url:
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
                 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
                 headers={'Referer': url})
                 headers={'Referer': url})
             token_data = self._download_json(
             token_data = self._download_json(

+ 2 - 3
youtube_dl/extractor/escapist.py

@@ -3,13 +3,12 @@ from __future__ import unicode_literals
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
-
 from ..utils import (
 from ..utils import (
     determine_ext,
     determine_ext,
     clean_html,
     clean_html,
     int_or_none,
     int_or_none,
     float_or_none,
     float_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -75,7 +74,7 @@ class EscapistIE(InfoExtractor):
         video_id = ims_video['videoID']
         video_id = ims_video['videoID']
         key = ims_video['hash']
         key = ims_video['hash']
 
 
-        config_req = compat_urllib_request.Request(
+        config_req = sanitized_Request(
             'http://www.escapistmagazine.com/videos/'
             'http://www.escapistmagazine.com/videos/'
             'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
             'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
         config_req.add_header('Referer', url)
         config_req.add_header('Referer', url)

+ 3 - 5
youtube_dl/extractor/everyonesmixtape.py

@@ -3,11 +3,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -42,7 +40,7 @@ class EveryonesMixtapeIE(InfoExtractor):
         playlist_id = mobj.group('id')
         playlist_id = mobj.group('id')
 
 
         pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
         pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
-        pllist_req = compat_urllib_request.Request(pllist_url)
+        pllist_req = sanitized_Request(pllist_url)
         pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
         pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
 
 
         playlist_list = self._download_json(
         playlist_list = self._download_json(
@@ -55,7 +53,7 @@ class EveryonesMixtapeIE(InfoExtractor):
             raise ExtractorError('Playlist id not found')
             raise ExtractorError('Playlist id not found')
 
 
         pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
         pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
-        pl_req = compat_urllib_request.Request(pl_url)
+        pl_req = sanitized_Request(pl_url)
         pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
         pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
         playlist = self._download_json(
         playlist = self._download_json(
             pl_req, playlist_id, note='Downloading playlist info')
             pl_req, playlist_id, note='Downloading playlist info')

+ 2 - 2
youtube_dl/extractor/extremetube.py

@@ -3,9 +3,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
+    sanitized_Request,
     str_to_int,
     str_to_int,
 )
 )
 
 
@@ -37,7 +37,7 @@ class ExtremeTubeIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 4 - 4
youtube_dl/extractor/facebook.py

@@ -10,11 +10,11 @@ from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_error,
     compat_urllib_error,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     limit_length,
     limit_length,
+    sanitized_Request,
     urlencode_postdata,
     urlencode_postdata,
     get_element_by_id,
     get_element_by_id,
     clean_html,
     clean_html,
@@ -73,7 +73,7 @@ class FacebookIE(InfoExtractor):
         if useremail is None:
         if useremail is None:
             return
             return
 
 
-        login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
+        login_page_req = sanitized_Request(self._LOGIN_URL)
         login_page_req.add_header('Cookie', 'locale=en_US')
         login_page_req.add_header('Cookie', 'locale=en_US')
         login_page = self._download_webpage(login_page_req, None,
         login_page = self._download_webpage(login_page_req, None,
                                             note='Downloading login page',
                                             note='Downloading login page',
@@ -94,7 +94,7 @@ class FacebookIE(InfoExtractor):
             'timezone': '-60',
             'timezone': '-60',
             'trynum': '1',
             'trynum': '1',
         }
         }
-        request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
+        request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         try:
         try:
             login_results = self._download_webpage(request, None,
             login_results = self._download_webpage(request, None,
@@ -109,7 +109,7 @@ class FacebookIE(InfoExtractor):
                     r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
                     r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
                 'name_action_selected': 'dont_save',
                 'name_action_selected': 'dont_save',
             }
             }
-            check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
+            check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
             check_response = self._download_webpage(check_req, None,
             check_response = self._download_webpage(check_req, None,
                                                     note='Confirming login')
                                                     note='Confirming login')

+ 3 - 2
youtube_dl/extractor/fc2.py

@@ -12,6 +12,7 @@ from ..compat import (
 from ..utils import (
 from ..utils import (
     encode_dict,
     encode_dict,
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -57,7 +58,7 @@ class FC2IE(InfoExtractor):
         }
         }
 
 
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
             'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
 
 
         login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
         login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
@@ -66,7 +67,7 @@ class FC2IE(InfoExtractor):
             return False
             return False
 
 
         # this is also needed
         # this is also needed
-        login_redir = compat_urllib_request.Request('http://id.fc2.com/?mode=redirect&login=done')
+        login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done')
         self._download_webpage(
         self._download_webpage(
             login_redir, None, note='Login redirect', errnote='Login redirect failed')
             login_redir, None, note='Login redirect', errnote='Login redirect failed')
 
 

+ 2 - 2
youtube_dl/extractor/flickr.py

@@ -3,10 +3,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     find_xpath_attr,
     find_xpath_attr,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -30,7 +30,7 @@ class FlickrIE(InfoExtractor):
         video_id = mobj.group('id')
         video_id = mobj.group('id')
         video_uploader_id = mobj.group('uploader_id')
         video_uploader_id = mobj.group('uploader_id')
         webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
         webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
-        req = compat_urllib_request.Request(webpage_url)
+        req = sanitized_Request(webpage_url)
         req.add_header(
         req.add_header(
             'User-Agent',
             'User-Agent',
             # it needs a more recent version
             # it needs a more recent version

+ 2 - 4
youtube_dl/extractor/fourtube.py

@@ -3,12 +3,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     parse_duration,
     parse_duration,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     str_to_int,
     str_to_int,
 )
 )
 
 
@@ -93,7 +91,7 @@ class FourTubeIE(InfoExtractor):
             b'Content-Type': b'application/x-www-form-urlencoded',
             b'Content-Type': b'application/x-www-form-urlencoded',
             b'Origin': b'http://www.4tube.com',
             b'Origin': b'http://www.4tube.com',
         }
         }
-        token_req = compat_urllib_request.Request(token_url, b'{}', headers)
+        token_req = sanitized_Request(token_url, b'{}', headers)
         tokens = self._download_json(token_req, video_id)
         tokens = self._download_json(token_req, video_id)
         formats = [{
         formats = [{
             'url': tokens[format]['token'],
             'url': tokens[format]['token'],

+ 3 - 5
youtube_dl/extractor/gdcvault.py

@@ -3,13 +3,11 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     remove_end,
     remove_end,
     HEADRequest,
     HEADRequest,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -125,7 +123,7 @@ class GDCVaultIE(InfoExtractor):
             'password': password,
             'password': password,
         }
         }
 
 
-        request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
+        request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self._download_webpage(request, display_id, 'Logging in')
         self._download_webpage(request, display_id, 'Logging in')
         start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
         start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')

+ 3 - 3
youtube_dl/extractor/generic.py

@@ -11,7 +11,6 @@ from .youtube import YoutubeIE
 from ..compat import (
 from ..compat import (
     compat_etree_fromstring,
     compat_etree_fromstring,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
     compat_xml_parse_error,
     compat_xml_parse_error,
 )
 )
@@ -22,6 +21,7 @@ from ..utils import (
     HEADRequest,
     HEADRequest,
     is_html,
     is_html,
     orderedSet,
     orderedSet,
+    sanitized_Request,
     smuggle_url,
     smuggle_url,
     unescapeHTML,
     unescapeHTML,
     unified_strdate,
     unified_strdate,
@@ -1215,7 +1215,7 @@ class GenericIE(InfoExtractor):
 
 
         full_response = None
         full_response = None
         if head_response is False:
         if head_response is False:
-            request = compat_urllib_request.Request(url)
+            request = sanitized_Request(url)
             request.add_header('Accept-Encoding', '*')
             request.add_header('Accept-Encoding', '*')
             full_response = self._request_webpage(request, video_id)
             full_response = self._request_webpage(request, video_id)
             head_response = full_response
             head_response = full_response
@@ -1244,7 +1244,7 @@ class GenericIE(InfoExtractor):
                 '%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
                 '%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
 
 
         if not full_response:
         if not full_response:
-            request = compat_urllib_request.Request(url)
+            request = sanitized_Request(url)
             # Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
             # Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
             # making it impossible to download only chunk of the file (yet we need only 512kB to
             # making it impossible to download only chunk of the file (yet we need only 512kB to
             # test whether it's HTML or not). According to youtube-dl default Accept-Encoding
             # test whether it's HTML or not). According to youtube-dl default Accept-Encoding

+ 3 - 5
youtube_dl/extractor/hearthisat.py

@@ -4,12 +4,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urlparse,
-)
+from ..compat import compat_urlparse
 from ..utils import (
 from ..utils import (
     HEADRequest,
     HEADRequest,
+    sanitized_Request,
     str_to_int,
     str_to_int,
     urlencode_postdata,
     urlencode_postdata,
     urlhandle_detect_ext,
     urlhandle_detect_ext,
@@ -47,7 +45,7 @@ class HearThisAtIE(InfoExtractor):
             r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
             r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
 
 
         payload = urlencode_postdata({'tracks[]': track_id})
         payload = urlencode_postdata({'tracks[]': track_id})
-        req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
+        req = sanitized_Request(self._PLAYLIST_URL, payload)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
 
         track = self._download_json(req, track_id, 'Downloading playlist')[0]
         track = self._download_json(req, track_id, 'Downloading playlist')[0]

+ 3 - 5
youtube_dl/extractor/hotnewhiphop.py

@@ -3,13 +3,11 @@ from __future__ import unicode_literals
 import base64
 import base64
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     HEADRequest,
     HEADRequest,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -41,7 +39,7 @@ class HotNewHipHopIE(InfoExtractor):
             ('mediaType', 's'),
             ('mediaType', 's'),
             ('mediaId', video_id),
             ('mediaId', video_id),
         ])
         ])
-        r = compat_urllib_request.Request(
+        r = sanitized_Request(
             'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
             'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
         r.add_header('Content-Type', 'application/x-www-form-urlencoded')
         r.add_header('Content-Type', 'application/x-www-form-urlencoded')
         mkd = self._download_json(
         mkd = self._download_json(

+ 4 - 6
youtube_dl/extractor/hypem.py

@@ -4,12 +4,10 @@ import json
 import time
 import time
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -32,7 +30,7 @@ class HypemIE(InfoExtractor):
         data = {'ax': 1, 'ts': time.time()}
         data = {'ax': 1, 'ts': time.time()}
         data_encoded = compat_urllib_parse.urlencode(data)
         data_encoded = compat_urllib_parse.urlencode(data)
         complete_url = url + "?" + data_encoded
         complete_url = url + "?" + data_encoded
-        request = compat_urllib_request.Request(complete_url)
+        request = sanitized_Request(complete_url)
         response, urlh = self._download_webpage_handle(
         response, urlh = self._download_webpage_handle(
             request, track_id, 'Downloading webpage with the url')
             request, track_id, 'Downloading webpage with the url')
         cookie = urlh.headers.get('Set-Cookie', '')
         cookie = urlh.headers.get('Set-Cookie', '')
@@ -52,7 +50,7 @@ class HypemIE(InfoExtractor):
         title = track['song']
         title = track['song']
 
 
         serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
         serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             serve_url, '', {'Content-Type': 'application/json'})
             serve_url, '', {'Content-Type': 'application/json'})
         request.add_header('cookie', cookie)
         request.add_header('cookie', cookie)
         song_data = self._download_json(request, track_id, 'Downloading metadata')
         song_data = self._download_json(request, track_id, 'Downloading metadata')

+ 2 - 4
youtube_dl/extractor/iprima.py

@@ -6,12 +6,10 @@ from random import random
 from math import floor
 from math import floor
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     remove_end,
     remove_end,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -61,7 +59,7 @@ class IPrimaIE(InfoExtractor):
             (floor(random() * 1073741824), floor(random() * 1073741824))
             (floor(random() * 1073741824), floor(random() * 1073741824))
         )
         )
 
 
-        req = compat_urllib_request.Request(player_url)
+        req = sanitized_Request(player_url)
         req.add_header('Referer', url)
         req.add_header('Referer', url)
         playerpage = self._download_webpage(req, video_id)
         playerpage = self._download_webpage(req, video_id)
 
 

+ 2 - 4
youtube_dl/extractor/ivi.py

@@ -5,11 +5,9 @@ import re
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -78,7 +76,7 @@ class IviIE(InfoExtractor):
             ]
             ]
         }
         }
 
 
-        request = compat_urllib_request.Request(api_url, json.dumps(data))
+        request = sanitized_Request(api_url, json.dumps(data))
 
 
         video_json_page = self._download_webpage(
         video_json_page = self._download_webpage(
             request, video_id, 'Downloading video JSON')
             request, video_id, 'Downloading video JSON')

+ 3 - 5
youtube_dl/extractor/keezmovies.py

@@ -4,10 +4,8 @@ import os
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse_urlparse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse_urlparse
+from ..utils import sanitized_Request
 
 
 
 
 class KeezMoviesIE(InfoExtractor):
 class KeezMoviesIE(InfoExtractor):
@@ -26,7 +24,7 @@ class KeezMoviesIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 2 - 2
youtube_dl/extractor/letv.py

@@ -8,13 +8,13 @@ import time
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_ord,
     compat_ord,
 )
 )
 from ..utils import (
 from ..utils import (
     determine_ext,
     determine_ext,
     ExtractorError,
     ExtractorError,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     int_or_none,
     int_or_none,
     encode_data_uri,
     encode_data_uri,
 )
 )
@@ -114,7 +114,7 @@ class LetvIE(InfoExtractor):
             'tkey': self.calc_time_key(int(time.time())),
             'tkey': self.calc_time_key(int(time.time())),
             'domain': 'www.letv.com'
             'domain': 'www.letv.com'
         }
         }
-        play_json_req = compat_urllib_request.Request(
+        play_json_req = sanitized_Request(
             'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
             'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
         )
         )
         cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
         cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')

+ 3 - 3
youtube_dl/extractor/lynda.py

@@ -7,12 +7,12 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     clean_html,
     clean_html,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -35,7 +35,7 @@ class LyndaBaseIE(InfoExtractor):
             'remember': 'false',
             'remember': 'false',
             'stayPut': 'false'
             'stayPut': 'false'
         }
         }
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(
         login_page = self._download_webpage(
             request, None, 'Logging in as %s' % username)
             request, None, 'Logging in as %s' % username)
@@ -64,7 +64,7 @@ class LyndaBaseIE(InfoExtractor):
                     'remember': 'false',
                     'remember': 'false',
                     'stayPut': 'false',
                     'stayPut': 'false',
                 }
                 }
-                request = compat_urllib_request.Request(
+                request = sanitized_Request(
                     self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
                     self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
                 login_page = self._download_webpage(
                 login_page = self._download_webpage(
                     request, None,
                     request, None,

+ 3 - 3
youtube_dl/extractor/metacafe.py

@@ -7,12 +7,12 @@ from ..compat import (
     compat_parse_qs,
     compat_parse_qs,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     determine_ext,
     determine_ext,
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor):
             'filters': '0',
             'filters': '0',
             'submit': "Continue - I'm over 18",
             'submit': "Continue - I'm over 18",
         }
         }
-        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+        request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         self.report_age_confirmation()
         self.report_age_confirmation()
         self._download_webpage(request, None, False, 'Unable to confirm age')
         self._download_webpage(request, None, False, 'Unable to confirm age')
@@ -142,7 +142,7 @@ class MetacafeIE(InfoExtractor):
                 return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
                 return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
 
 
         # Retrieve video webpage to extract further information
         # Retrieve video webpage to extract further information
-        req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
+        req = sanitized_Request('http://www.metacafe.com/watch/%s/' % video_id)
 
 
         # AnyClip videos require the flashversion cookie so that we get the link
         # AnyClip videos require the flashversion cookie so that we get the link
         # to the mp4 file
         # to the mp4 file

+ 3 - 5
youtube_dl/extractor/minhateca.py

@@ -2,14 +2,12 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     parse_duration,
     parse_duration,
     parse_filesize,
     parse_filesize,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -39,7 +37,7 @@ class MinhatecaIE(InfoExtractor):
             ('fileId', video_id),
             ('fileId', video_id),
             ('__RequestVerificationToken', token),
             ('__RequestVerificationToken', token),
         ]
         ]
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             'http://minhateca.com.br/action/License/Download',
             'http://minhateca.com.br/action/License/Download',
             data=compat_urllib_parse.urlencode(token_data))
             data=compat_urllib_parse.urlencode(token_data))
         req.add_header('Content-Type', 'application/x-www-form-urlencoded')
         req.add_header('Content-Type', 'application/x-www-form-urlencoded')

+ 2 - 2
youtube_dl/extractor/miomio.py

@@ -4,11 +4,11 @@ from __future__ import unicode_literals
 import random
 import random
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     xpath_text,
     xpath_text,
     int_or_none,
     int_or_none,
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -63,7 +63,7 @@ class MioMioIE(InfoExtractor):
             'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
             'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
             video_id)
             video_id)
 
 
-        vid_config_request = compat_urllib_request.Request(
+        vid_config_request = sanitized_Request(
             'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
             'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
             headers=http_headers)
             headers=http_headers)
 
 

+ 3 - 5
youtube_dl/extractor/moevideo.py

@@ -5,13 +5,11 @@ import json
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -80,7 +78,7 @@ class MoeVideoIE(InfoExtractor):
         ]
         ]
         r_json = json.dumps(r)
         r_json = json.dumps(r)
         post = compat_urllib_parse.urlencode({'r': r_json})
         post = compat_urllib_parse.urlencode({'r': r_json})
-        req = compat_urllib_request.Request(self._API_URL, post)
+        req = sanitized_Request(self._API_URL, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
 
         response = self._download_json(req, video_id)
         response = self._download_json(req, video_id)

+ 2 - 2
youtube_dl/extractor/mofosex.py

@@ -7,8 +7,8 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
 )
 )
+from ..utils import sanitized_Request
 
 
 
 
 class MofosexIE(InfoExtractor):
 class MofosexIE(InfoExtractor):
@@ -29,7 +29,7 @@ class MofosexIE(InfoExtractor):
         video_id = mobj.group('id')
         video_id = mobj.group('id')
         url = 'http://www.' + mobj.group('url')
         url = 'http://www.' + mobj.group('url')
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 4 - 6
youtube_dl/extractor/moniker.py

@@ -5,13 +5,11 @@ import os.path
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     remove_start,
     remove_start,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -81,7 +79,7 @@ class MonikerIE(InfoExtractor):
             orig_webpage, 'builtin URL', default=None, group='url')
             orig_webpage, 'builtin URL', default=None, group='url')
 
 
         if builtin_url:
         if builtin_url:
-            req = compat_urllib_request.Request(builtin_url)
+            req = sanitized_Request(builtin_url)
             req.add_header('Referer', url)
             req.add_header('Referer', url)
             webpage = self._download_webpage(req, video_id, 'Downloading builtin page')
             webpage = self._download_webpage(req, video_id, 'Downloading builtin page')
             title = self._og_search_title(orig_webpage).strip()
             title = self._og_search_title(orig_webpage).strip()
@@ -94,7 +92,7 @@ class MonikerIE(InfoExtractor):
             headers = {
             headers = {
                 b'Content-Type': b'application/x-www-form-urlencoded',
                 b'Content-Type': b'application/x-www-form-urlencoded',
             }
             }
-            req = compat_urllib_request.Request(url, post, headers)
+            req = sanitized_Request(url, post, headers)
             webpage = self._download_webpage(
             webpage = self._download_webpage(
                 req, video_id, note='Downloading video page ...')
                 req, video_id, note='Downloading video page ...')
 
 

+ 3 - 5
youtube_dl/extractor/mooshare.py

@@ -3,12 +3,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -59,7 +57,7 @@ class MooshareIE(InfoExtractor):
             'hash': hash_key,
             'hash': hash_key,
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
             'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
 

+ 2 - 4
youtube_dl/extractor/movieclips.py

@@ -2,9 +2,7 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
+from ..utils import sanitized_Request
 
 
 
 
 class MovieClipsIE(InfoExtractor):
 class MovieClipsIE(InfoExtractor):
@@ -25,7 +23,7 @@ class MovieClipsIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         display_id = self._match_id(url)
         display_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         # it doesn't work if it thinks the browser it's too old
         # it doesn't work if it thinks the browser it's too old
         req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)')
         req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)')
         webpage = self._download_webpage(req, display_id)
         webpage = self._download_webpage(req, display_id)

+ 2 - 2
youtube_dl/extractor/mtv.py

@@ -5,7 +5,6 @@ import re
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_str,
     compat_str,
 )
 )
 from ..utils import (
 from ..utils import (
@@ -13,6 +12,7 @@ from ..utils import (
     find_xpath_attr,
     find_xpath_attr,
     fix_xml_ampersands,
     fix_xml_ampersands,
     HEADRequest,
     HEADRequest,
+    sanitized_Request,
     unescapeHTML,
     unescapeHTML,
     url_basename,
     url_basename,
     RegexNotFoundError,
     RegexNotFoundError,
@@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
 
 
     def _extract_mobile_video_formats(self, mtvn_id):
     def _extract_mobile_video_formats(self, mtvn_id):
         webpage_url = self._MOBILE_TEMPLATE % mtvn_id
         webpage_url = self._MOBILE_TEMPLATE % mtvn_id
-        req = compat_urllib_request.Request(webpage_url)
+        req = sanitized_Request(webpage_url)
         # Otherwise we get a webpage that would execute some javascript
         # Otherwise we get a webpage that would execute some javascript
         req.add_header('User-Agent', 'curl/7')
         req.add_header('User-Agent', 'curl/7')
         webpage = self._download_webpage(req, mtvn_id,
         webpage = self._download_webpage(req, mtvn_id,

+ 2 - 2
youtube_dl/extractor/myvideo.py

@@ -11,10 +11,10 @@ from ..compat import (
     compat_ord,
     compat_ord,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -83,7 +83,7 @@ class MyVideoIE(InfoExtractor):
 
 
         mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
         mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
         if mobj is not None:
         if mobj is not None:
-            request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
+            request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
             response = self._download_webpage(request, video_id,
             response = self._download_webpage(request, video_id,
                                               'Downloading video info')
                                               'Downloading video info')
             info = json.loads(base64.b64decode(response).decode('utf-8'))
             info = json.loads(base64.b64decode(response).decode('utf-8'))

+ 2 - 2
youtube_dl/extractor/neteasemusic.py

@@ -8,11 +8,11 @@ import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
-    compat_urllib_request,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_str,
     compat_str,
     compat_itertools_count,
     compat_itertools_count,
 )
 )
+from ..utils import sanitized_Request
 
 
 
 
 class NetEaseMusicBaseIE(InfoExtractor):
 class NetEaseMusicBaseIE(InfoExtractor):
@@ -56,7 +56,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
         return int(round(ms / 1000.0))
         return int(round(ms / 1000.0))
 
 
     def query_api(self, endpoint, video_id, note):
     def query_api(self, endpoint, video_id, note):
-        req = compat_urllib_request.Request('%s%s' % (self._API_BASE, endpoint))
+        req = sanitized_Request('%s%s' % (self._API_BASE, endpoint))
         req.add_header('Referer', self._API_BASE)
         req.add_header('Referer', self._API_BASE)
         return self._download_json(req, video_id, note)
         return self._download_json(req, video_id, note)
 
 

+ 5 - 6
youtube_dl/extractor/nfb.py

@@ -1,10 +1,8 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
 
 
 
 
 class NFBIE(InfoExtractor):
 class NFBIE(InfoExtractor):
@@ -40,8 +38,9 @@ class NFBIE(InfoExtractor):
         uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
         uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
                                            page, 'director name', fatal=False)
                                            page, 'director name', fatal=False)
 
 
-        request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
-                                                compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+        request = sanitized_Request(
+            'https://www.nfb.ca/film/%s/player_config' % video_id,
+            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
 
 

+ 3 - 3
youtube_dl/extractor/niconico.py

@@ -8,7 +8,6 @@ import datetime
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
@@ -17,6 +16,7 @@ from ..utils import (
     int_or_none,
     int_or_none,
     parse_duration,
     parse_duration,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     xpath_text,
     xpath_text,
     determine_ext,
     determine_ext,
 )
 )
@@ -102,7 +102,7 @@ class NiconicoIE(InfoExtractor):
             'password': password,
             'password': password,
         }
         }
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'https://secure.nicovideo.jp/secure/login', login_data)
             'https://secure.nicovideo.jp/secure/login', login_data)
         login_results = self._download_webpage(
         login_results = self._download_webpage(
             request, None, note='Logging in', errnote='Unable to log in')
             request, None, note='Logging in', errnote='Unable to log in')
@@ -145,7 +145,7 @@ class NiconicoIE(InfoExtractor):
                 'k': thumb_play_key,
                 'k': thumb_play_key,
                 'v': video_id
                 'v': video_id
             })
             })
-            flv_info_request = compat_urllib_request.Request(
+            flv_info_request = sanitized_Request(
                 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
                 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
                 {'Content-Type': 'application/x-www-form-urlencoded'})
                 {'Content-Type': 'application/x-www-form-urlencoded'})
             flv_info_webpage = self._download_webpage(
             flv_info_webpage = self._download_webpage(

+ 2 - 2
youtube_dl/extractor/noco.py

@@ -9,7 +9,6 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     clean_html,
     clean_html,
@@ -17,6 +16,7 @@ from ..utils import (
     int_or_none,
     int_or_none,
     float_or_none,
     float_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -74,7 +74,7 @@ class NocoIE(InfoExtractor):
             'username': username,
             'username': username,
             'password': password,
             'password': password,
         }
         }
-        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
 
 
         login = self._download_json(request, None, 'Logging in as %s' % username)
         login = self._download_json(request, None, 'Logging in as %s' % username)

+ 2 - 4
youtube_dl/extractor/nosvideo.py

@@ -4,11 +4,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
     urlencode_postdata,
     urlencode_postdata,
     xpath_text,
     xpath_text,
     xpath_with_ns,
     xpath_with_ns,
@@ -41,7 +39,7 @@ class NosVideoIE(InfoExtractor):
             'op': 'download1',
             'op': 'download1',
             'method_free': 'Continue to Video',
             'method_free': 'Continue to Video',
         }
         }
-        req = compat_urllib_request.Request(url, urlencode_postdata(fields))
+        req = sanitized_Request(url, urlencode_postdata(fields))
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         webpage = self._download_webpage(req, video_id,
         webpage = self._download_webpage(req, video_id,
                                          'Downloading download page')
                                          'Downloading download page')

+ 3 - 5
youtube_dl/extractor/novamov.py

@@ -3,14 +3,12 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urlparse,
-)
+from ..compat import compat_urlparse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     NO_DEFAULT,
     NO_DEFAULT,
     encode_dict,
     encode_dict,
+    sanitized_Request,
     urlencode_postdata,
     urlencode_postdata,
 )
 )
 
 
@@ -65,7 +63,7 @@ class NovaMovIE(InfoExtractor):
                 'post url', default=url, group='url')
                 'post url', default=url, group='url')
             if not post_url.startswith('http'):
             if not post_url.startswith('http'):
                 post_url = compat_urlparse.urljoin(url, post_url)
                 post_url = compat_urlparse.urljoin(url, post_url)
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 post_url, urlencode_postdata(encode_dict(fields)))
                 post_url, urlencode_postdata(encode_dict(fields)))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             request.add_header('Referer', post_url)
             request.add_header('Referer', post_url)

+ 5 - 5
youtube_dl/extractor/nowness.py

@@ -3,10 +3,10 @@ from __future__ import unicode_literals
 
 
 from .brightcove import BrightcoveLegacyIE
 from .brightcove import BrightcoveLegacyIE
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..utils import ExtractorError
-from ..compat import (
-    compat_str,
-    compat_urllib_request,
+from ..compat import compat_str
+from ..utils import (
+    ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -37,7 +37,7 @@ class NownessBaseIE(InfoExtractor):
 
 
     def _api_request(self, url, request_path):
     def _api_request(self, url, request_path):
         display_id = self._match_id(url)
         display_id = self._match_id(url)
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'http://api.nowness.com/api/' + request_path % display_id,
             'http://api.nowness.com/api/' + request_path % display_id,
             headers={
             headers={
                 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
                 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',

+ 2 - 4
youtube_dl/extractor/nuvid.py

@@ -3,11 +3,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     parse_duration,
     parse_duration,
+    sanitized_Request,
     unified_strdate,
     unified_strdate,
 )
 )
 
 
@@ -33,7 +31,7 @@ class NuvidIE(InfoExtractor):
         formats = []
         formats = []
 
 
         for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]:
         for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]:
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 'http://m.nuvid.com/play/%s' % video_id)
                 'http://m.nuvid.com/play/%s' % video_id)
             request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed)
             request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed)
             webpage = self._download_webpage(
             webpage = self._download_webpage(

+ 2 - 4
youtube_dl/extractor/patreon.py

@@ -2,9 +2,7 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..utils import (
-    js_to_json,
-)
+from ..utils import js_to_json
 
 
 
 
 class PatreonIE(InfoExtractor):
 class PatreonIE(InfoExtractor):
@@ -65,7 +63,7 @@ class PatreonIE(InfoExtractor):
             'password': password,
             'password': password,
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'https://www.patreon.com/processLogin',
             'https://www.patreon.com/processLogin',
             compat_urllib_parse.urlencode(login_form).encode('utf-8')
             compat_urllib_parse.urlencode(login_form).encode('utf-8')
         )
         )

+ 3 - 5
youtube_dl/extractor/played.py

@@ -5,12 +5,10 @@ import re
 import os.path
 import os.path
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -46,7 +44,7 @@ class PlayedIE(InfoExtractor):
         headers = {
         headers = {
             b'Content-Type': b'application/x-www-form-urlencoded',
             b'Content-Type': b'application/x-www-form-urlencoded',
         }
         }
-        req = compat_urllib_request.Request(url, post, headers)
+        req = sanitized_Request(url, post, headers)
         webpage = self._download_webpage(
         webpage = self._download_webpage(
             req, video_id, note='Downloading video page ...')
             req, video_id, note='Downloading video page ...')
 
 

+ 3 - 3
youtube_dl/extractor/pluralsight.py

@@ -8,13 +8,13 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
     parse_duration,
     parse_duration,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -73,7 +73,7 @@ class PluralsightIE(PluralsightBaseIE):
         if not post_url.startswith('http'):
         if not post_url.startswith('http'):
             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
 
@@ -181,7 +181,7 @@ class PluralsightIE(PluralsightBaseIE):
                     'mt': ext,
                     'mt': ext,
                     'q': '%dx%d' % (f['width'], f['height']),
                     'q': '%dx%d' % (f['width'], f['height']),
                 }
                 }
-                request = compat_urllib_request.Request(
+                request = sanitized_Request(
                     '%s/training/Player/ViewClip' % self._API_BASE,
                     '%s/training/Player/ViewClip' % self._API_BASE,
                     json.dumps(clip_post).encode('utf-8'))
                     json.dumps(clip_post).encode('utf-8'))
                 request.add_header('Content-Type', 'application/json;charset=utf-8')
                 request.add_header('Content-Type', 'application/json;charset=utf-8')

+ 2 - 2
youtube_dl/extractor/pornhub.py

@@ -8,10 +8,10 @@ from ..compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
     str_to_int,
     str_to_int,
 )
 )
 from ..aes import (
 from ..aes import (
@@ -53,7 +53,7 @@ class PornHubIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
             'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)

+ 4 - 6
youtube_dl/extractor/pornotube.py

@@ -3,11 +3,9 @@ from __future__ import unicode_literals
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -46,7 +44,7 @@ class PornotubeIE(InfoExtractor):
             'authenticationSpaceKey': originAuthenticationSpaceKey,
             'authenticationSpaceKey': originAuthenticationSpaceKey,
             'credentials': 'Clip Application',
             'credentials': 'Clip Application',
         }
         }
-        token_req = compat_urllib_request.Request(
+        token_req = sanitized_Request(
             'https://api.aebn.net/auth/v1/token/primal',
             'https://api.aebn.net/auth/v1/token/primal',
             data=json.dumps(token_req_data).encode('utf-8'))
             data=json.dumps(token_req_data).encode('utf-8'))
         token_req.add_header('Content-Type', 'application/json')
         token_req.add_header('Content-Type', 'application/json')
@@ -56,7 +54,7 @@ class PornotubeIE(InfoExtractor):
         token = token_answer['tokenKey']
         token = token_answer['tokenKey']
 
 
         # Get video URL
         # Get video URL
-        delivery_req = compat_urllib_request.Request(
+        delivery_req = sanitized_Request(
             'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id)
             'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id)
         delivery_req.add_header('Authorization', token)
         delivery_req.add_header('Authorization', token)
         delivery_info = self._download_json(
         delivery_info = self._download_json(
@@ -64,7 +62,7 @@ class PornotubeIE(InfoExtractor):
         video_url = delivery_info['mediaUrl']
         video_url = delivery_info['mediaUrl']
 
 
         # Get additional info (title etc.)
         # Get additional info (title etc.)
-        info_req = compat_urllib_request.Request(
+        info_req = sanitized_Request(
             'https://api.aebn.net/content/v1/clips/%s?expand='
             'https://api.aebn.net/content/v1/clips/%s?expand='
             'title,description,primaryImageNumber,startSecond,endSecond,'
             'title,description,primaryImageNumber,startSecond,endSecond,'
             'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,'
             'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,'

+ 5 - 5
youtube_dl/extractor/primesharetv.py

@@ -1,11 +1,11 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
+from ..compat import compat_urllib_parse
+from ..utils import (
+    ExtractorError,
+    sanitized_Request,
 )
 )
-from ..utils import ExtractorError
 
 
 
 
 class PrimeShareTVIE(InfoExtractor):
 class PrimeShareTVIE(InfoExtractor):
@@ -41,7 +41,7 @@ class PrimeShareTVIE(InfoExtractor):
             webpage, 'wait time', default=7)) + 1
             webpage, 'wait time', default=7)) + 1
         self._sleep(wait_time, video_id)
         self._sleep(wait_time, video_id)
 
 
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             url, compat_urllib_parse.urlencode(fields), headers)
             url, compat_urllib_parse.urlencode(fields), headers)
         video_page = self._download_webpage(
         video_page = self._download_webpage(
             req, video_id, 'Downloading video page')
             req, video_id, 'Downloading video page')

+ 3 - 5
youtube_dl/extractor/promptfile.py

@@ -4,13 +4,11 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     determine_ext,
     determine_ext,
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -37,7 +35,7 @@ class PromptFileIE(InfoExtractor):
 
 
         fields = self._hidden_inputs(webpage)
         fields = self._hidden_inputs(webpage)
         post = compat_urllib_parse.urlencode(fields)
         post = compat_urllib_parse.urlencode(fields)
-        req = compat_urllib_request.Request(url, post)
+        req = sanitized_Request(url, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         webpage = self._download_webpage(
         webpage = self._download_webpage(
             req, video_id, 'Downloading video page')
             req, video_id, 'Downloading video page')

+ 2 - 2
youtube_dl/extractor/qqmusic.py

@@ -7,11 +7,11 @@ import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..utils import (
 from ..utils import (
+    sanitized_Request,
     strip_jsonp,
     strip_jsonp,
     unescapeHTML,
     unescapeHTML,
     clean_html,
     clean_html,
 )
 )
-from ..compat import compat_urllib_request
 
 
 
 
 class QQMusicIE(InfoExtractor):
 class QQMusicIE(InfoExtractor):
@@ -201,7 +201,7 @@ class QQMusicSingerIE(QQPlaylistBaseIE):
         singer_desc = None
         singer_desc = None
 
 
         if singer_id:
         if singer_id:
-            req = compat_urllib_request.Request(
+            req = sanitized_Request(
                 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
                 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
             req.add_header(
             req.add_header(
                 'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
                 'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')

+ 2 - 2
youtube_dl/extractor/rtve.py

@@ -6,11 +6,11 @@ import re
 import time
 import time
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     float_or_none,
     float_or_none,
     remove_end,
     remove_end,
+    sanitized_Request,
     std_headers,
     std_headers,
     struct_unpack,
     struct_unpack,
 )
 )
@@ -102,7 +102,7 @@ class RTVEALaCartaIE(InfoExtractor):
         if info['state'] == 'DESPU':
         if info['state'] == 'DESPU':
             raise ExtractorError('The video is no longer available', expected=True)
             raise ExtractorError('The video is no longer available', expected=True)
         png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
         png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
-        png_request = compat_urllib_request.Request(png_url)
+        png_request = sanitized_Request(png_url)
         png_request.add_header('Referer', url)
         png_request.add_header('Referer', url)
         png = self._download_webpage(png_request, video_id, 'Downloading url information')
         png = self._download_webpage(png_request, video_id, 'Downloading url information')
         video_url = _decrypt_url(png)
         video_url = _decrypt_url(png)

+ 3 - 5
youtube_dl/extractor/safari.py

@@ -6,12 +6,10 @@ import re
 from .common import InfoExtractor
 from .common import InfoExtractor
 from .brightcove import BrightcoveLegacyIE
 from .brightcove import BrightcoveLegacyIE
 
 
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
     smuggle_url,
     smuggle_url,
     std_headers,
     std_headers,
 )
 )
@@ -58,7 +56,7 @@ class SafariBaseIE(InfoExtractor):
             'next': '',
             'next': '',
         }
         }
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
         login_page = self._download_webpage(
         login_page = self._download_webpage(
             request, None, 'Logging in as %s' % username)
             request, None, 'Logging in as %s' % username)

+ 3 - 5
youtube_dl/extractor/sandia.py

@@ -6,14 +6,12 @@ import json
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urlparse,
-)
+from ..compat import compat_urlparse
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     js_to_json,
     js_to_json,
     mimetype2ext,
     mimetype2ext,
+    sanitized_Request,
     unified_strdate,
     unified_strdate,
 )
 )
 
 
@@ -37,7 +35,7 @@ class SandiaIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4')
         req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 3 - 5
youtube_dl/extractor/shared.py

@@ -3,13 +3,11 @@ from __future__ import unicode_literals
 import base64
 import base64
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -46,7 +44,7 @@ class SharedIE(InfoExtractor):
                 'Video %s does not exist' % video_id, expected=True)
                 'Video %s does not exist' % video_id, expected=True)
 
 
         download_form = self._hidden_inputs(webpage)
         download_form = self._hidden_inputs(webpage)
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             url, compat_urllib_parse.urlencode(download_form))
             url, compat_urllib_parse.urlencode(download_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
 

+ 3 - 5
youtube_dl/extractor/sharesix.py

@@ -4,12 +4,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     parse_duration,
     parse_duration,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -50,7 +48,7 @@ class ShareSixIE(InfoExtractor):
             'method_free': 'Free'
             'method_free': 'Free'
         }
         }
         post = compat_urllib_parse.urlencode(fields)
         post = compat_urllib_parse.urlencode(fields)
-        req = compat_urllib_request.Request(url, post)
+        req = sanitized_Request(url, post)
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
         req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
 
         webpage = self._download_webpage(req, video_id,
         webpage = self._download_webpage(req, video_id,

+ 3 - 5
youtube_dl/extractor/sina.py

@@ -4,10 +4,8 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
 
 
 
 
 class SinaIE(InfoExtractor):
 class SinaIE(InfoExtractor):
@@ -61,7 +59,7 @@ class SinaIE(InfoExtractor):
         if mobj.group('token') is not None:
         if mobj.group('token') is not None:
             # The video id is in the redirected url
             # The video id is in the redirected url
             self.to_screen('Getting video id')
             self.to_screen('Getting video id')
-            request = compat_urllib_request.Request(url)
+            request = sanitized_Request(url)
             request.get_method = lambda: 'HEAD'
             request.get_method = lambda: 'HEAD'
             (_, urlh) = self._download_webpage_handle(request, 'NA', False)
             (_, urlh) = self._download_webpage_handle(request, 'NA', False)
             return self._real_extract(urlh.geturl())
             return self._real_extract(urlh.geturl())

+ 4 - 6
youtube_dl/extractor/smotri.py

@@ -7,13 +7,11 @@ import hashlib
 import uuid
 import uuid
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
     unified_strdate,
     unified_strdate,
 )
 )
 
 
@@ -176,7 +174,7 @@ class SmotriIE(InfoExtractor):
         if video_password:
         if video_password:
             video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
             video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
             'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 
 
@@ -339,7 +337,7 @@ class SmotriBroadcastIE(InfoExtractor):
                 'password': password,
                 'password': password,
             }
             }
 
 
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
                 broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             request.add_header('Content-Type', 'application/x-www-form-urlencoded')
             broadcast_page = self._download_webpage(
             broadcast_page = self._download_webpage(

+ 2 - 2
youtube_dl/extractor/sohu.py

@@ -6,11 +6,11 @@ import re
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
-    compat_urllib_request,
     compat_urllib_parse,
     compat_urllib_parse,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -96,7 +96,7 @@ class SohuIE(InfoExtractor):
             else:
             else:
                 base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
                 base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
 
 
-            req = compat_urllib_request.Request(base_data_url + vid_id)
+            req = sanitized_Request(base_data_url + vid_id)
 
 
             cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
             cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
             if cn_verification_proxy:
             if cn_verification_proxy:

+ 2 - 2
youtube_dl/extractor/spankwire.py

@@ -6,9 +6,9 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
+    sanitized_Request,
     str_to_int,
     str_to_int,
     unified_strdate,
     unified_strdate,
 )
 )
@@ -51,7 +51,7 @@ class SpankwireIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
         video_id = mobj.group('id')
 
 
-        req = compat_urllib_request.Request('http://www.' + mobj.group('url'))
+        req = sanitized_Request('http://www.' + mobj.group('url'))
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 2 - 4
youtube_dl/extractor/sportdeutschland.py

@@ -4,11 +4,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -54,7 +52,7 @@ class SportDeutschlandIE(InfoExtractor):
 
 
         api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
         api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
             sport_id, video_id)
             sport_id, video_id)
-        req = compat_urllib_request.Request(api_url, headers={
+        req = sanitized_Request(api_url, headers={
             'Accept': 'application/vnd.vidibus.v2.html+json',
             'Accept': 'application/vnd.vidibus.v2.html+json',
             'Referer': url,
             'Referer': url,
         })
         })

+ 3 - 5
youtube_dl/extractor/streamcloud.py

@@ -4,10 +4,8 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
 
 
 
 
 class StreamcloudIE(InfoExtractor):
 class StreamcloudIE(InfoExtractor):
@@ -43,7 +41,7 @@ class StreamcloudIE(InfoExtractor):
         headers = {
         headers = {
             b'Content-Type': b'application/x-www-form-urlencoded',
             b'Content-Type': b'application/x-www-form-urlencoded',
         }
         }
-        req = compat_urllib_request.Request(url, post, headers)
+        req = sanitized_Request(url, post, headers)
 
 
         webpage = self._download_webpage(
         webpage = self._download_webpage(
             req, video_id, note='Downloading video page ...')
             req, video_id, note='Downloading video page ...')

+ 2 - 4
youtube_dl/extractor/streamcz.py

@@ -5,11 +5,9 @@ import hashlib
 import time
 import time
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -54,7 +52,7 @@ class StreamCZIE(InfoExtractor):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
         api_path = '/episode/%s' % video_id
         api_path = '/episode/%s' % video_id
 
 
-        req = compat_urllib_request.Request(self._API_URL + api_path)
+        req = sanitized_Request(self._API_URL + api_path)
         req.add_header('Api-Password', _get_api_key(api_path))
         req.add_header('Api-Password', _get_api_key(api_path))
         data = self._download_json(req, video_id)
         data = self._download_json(req, video_id)
 
 

+ 2 - 4
youtube_dl/extractor/tapely.py

@@ -4,14 +4,12 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     clean_html,
     clean_html,
     ExtractorError,
     ExtractorError,
     float_or_none,
     float_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -53,7 +51,7 @@ class TapelyIE(InfoExtractor):
         display_id = mobj.group('id')
         display_id = mobj.group('id')
 
 
         playlist_url = self._API_URL.format(display_id)
         playlist_url = self._API_URL.format(display_id)
-        request = compat_urllib_request.Request(playlist_url)
+        request = sanitized_Request(playlist_url)
         request.add_header('X-Requested-With', 'XMLHttpRequest')
         request.add_header('X-Requested-With', 'XMLHttpRequest')
         request.add_header('Accept', 'application/json')
         request.add_header('Accept', 'application/json')
         request.add_header('Referer', url)
         request.add_header('Referer', url)

+ 3 - 5
youtube_dl/extractor/tube8.py

@@ -4,12 +4,10 @@ import json
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse_urlparse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse_urlparse
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
+    sanitized_Request,
     str_to_int,
     str_to_int,
 )
 )
 from ..aes import aes_decrypt_text
 from ..aes import aes_decrypt_text
@@ -42,7 +40,7 @@ class Tube8IE(InfoExtractor):
         video_id = mobj.group('id')
         video_id = mobj.group('id')
         display_id = mobj.group('display_id')
         display_id = mobj.group('display_id')
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, display_id)
         webpage = self._download_webpage(req, display_id)
 
 

+ 3 - 5
youtube_dl/extractor/tubitv.py

@@ -5,13 +5,11 @@ import codecs
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -44,7 +42,7 @@ class TubiTvIE(InfoExtractor):
             'password': password,
             'password': password,
         }
         }
         payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
         payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
-        request = compat_urllib_request.Request(self._LOGIN_URL, payload)
+        request = sanitized_Request(self._LOGIN_URL, payload)
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_page = self._download_webpage(
         login_page = self._download_webpage(
             request, None, False, 'Wrong login info')
             request, None, False, 'Wrong login info')

+ 3 - 3
youtube_dl/extractor/twitch.py

@@ -11,7 +11,6 @@ from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
@@ -20,6 +19,7 @@ from ..utils import (
     int_or_none,
     int_or_none,
     parse_duration,
     parse_duration,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -48,7 +48,7 @@ class TwitchBaseIE(InfoExtractor):
         for cookie in self._downloader.cookiejar:
         for cookie in self._downloader.cookiejar:
             if cookie.name == 'api_token':
             if cookie.name == 'api_token':
                 headers['Twitch-Api-Token'] = cookie.value
                 headers['Twitch-Api-Token'] = cookie.value
-        request = compat_urllib_request.Request(url, headers=headers)
+        request = sanitized_Request(url, headers=headers)
         response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
         response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
         self._handle_error(response)
         self._handle_error(response)
         return response
         return response
@@ -80,7 +80,7 @@ class TwitchBaseIE(InfoExtractor):
         if not post_url.startswith('http'):
         if not post_url.startswith('http'):
             post_url = compat_urlparse.urljoin(redirect_url, post_url)
             post_url = compat_urlparse.urljoin(redirect_url, post_url)
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
             post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
         request.add_header('Referer', redirect_url)
         request.add_header('Referer', redirect_url)
         response = self._download_webpage(
         response = self._download_webpage(

+ 2 - 2
youtube_dl/extractor/twitter.py

@@ -4,13 +4,13 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     float_or_none,
     float_or_none,
     xpath_text,
     xpath_text,
     remove_end,
     remove_end,
     int_or_none,
     int_or_none,
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -81,7 +81,7 @@ class TwitterCardIE(InfoExtractor):
         config = None
         config = None
         formats = []
         formats = []
         for user_agent in USER_AGENTS:
         for user_agent in USER_AGENTS:
-            request = compat_urllib_request.Request(url)
+            request = sanitized_Request(url)
             request.add_header('User-Agent', user_agent)
             request.add_header('User-Agent', user_agent)
             webpage = self._download_webpage(request, video_id)
             webpage = self._download_webpage(request, video_id)
 
 

+ 3 - 2
youtube_dl/extractor/udemy.py

@@ -9,6 +9,7 @@ from ..compat import (
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -58,7 +59,7 @@ class UdemyIE(InfoExtractor):
             for header, value in headers.items():
             for header, value in headers.items():
                 url_or_request.add_header(header, value)
                 url_or_request.add_header(header, value)
         else:
         else:
-            url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
+            url_or_request = sanitized_Request(url_or_request, headers=headers)
 
 
         response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
         response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
         self._handle_error(response)
         self._handle_error(response)
@@ -89,7 +90,7 @@ class UdemyIE(InfoExtractor):
             'password': password.encode('utf-8'),
             'password': password.encode('utf-8'),
         })
         })
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         request.add_header('Referer', self._ORIGIN_URL)
         request.add_header('Referer', self._ORIGIN_URL)
         request.add_header('Origin', self._ORIGIN_URL)
         request.add_header('Origin', self._ORIGIN_URL)

+ 2 - 2
youtube_dl/extractor/vbox7.py

@@ -4,11 +4,11 @@ from __future__ import unicode_literals
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -49,7 +49,7 @@ class Vbox7IE(InfoExtractor):
 
 
         info_url = "http://vbox7.com/play/magare.do"
         info_url = "http://vbox7.com/play/magare.do"
         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
-        info_request = compat_urllib_request.Request(info_url, data)
+        info_request = sanitized_Request(info_url, data)
         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
         if info_response is None:
         if info_response is None:

+ 2 - 4
youtube_dl/extractor/veoh.py

@@ -4,12 +4,10 @@ import re
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-)
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     ExtractorError,
     ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -110,7 +108,7 @@ class VeohIE(InfoExtractor):
         if 'class="adultwarning-container"' in webpage:
         if 'class="adultwarning-container"' in webpage:
             self.report_age_confirmation()
             self.report_age_confirmation()
             age_limit = 18
             age_limit = 18
-            request = compat_urllib_request.Request(url)
+            request = sanitized_Request(url)
             request.add_header('Cookie', 'confirmedAdult=true')
             request.add_header('Cookie', 'confirmedAdult=true')
             webpage = self._download_webpage(request, video_id)
             webpage = self._download_webpage(request, video_id)
 
 

+ 2 - 2
youtube_dl/extractor/vessel.py

@@ -4,10 +4,10 @@ from __future__ import unicode_literals
 import json
 import json
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -33,7 +33,7 @@ class VesselIE(InfoExtractor):
     @staticmethod
     @staticmethod
     def make_json_request(url, data):
     def make_json_request(url, data):
         payload = json.dumps(data).encode('utf-8')
         payload = json.dumps(data).encode('utf-8')
-        req = compat_urllib_request.Request(url, payload)
+        req = sanitized_Request(url, payload)
         req.add_header('Content-Type', 'application/json; charset=utf-8')
         req.add_header('Content-Type', 'application/json; charset=utf-8')
         return req
         return req
 
 

+ 3 - 5
youtube_dl/extractor/vevo.py

@@ -3,13 +3,11 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_etree_fromstring,
-    compat_urllib_request,
-)
+from ..compat import compat_etree_fromstring
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -73,7 +71,7 @@ class VevoIE(InfoExtractor):
     _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
     _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
 
 
     def _real_initialize(self):
     def _real_initialize(self):
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             'http://www.vevo.com/auth', data=b'')
             'http://www.vevo.com/auth', data=b'')
         webpage = self._download_webpage(
         webpage = self._download_webpage(
             req, None,
             req, None,

+ 2 - 4
youtube_dl/extractor/viddler.py

@@ -4,9 +4,7 @@ from .common import InfoExtractor
 from ..utils import (
 from ..utils import (
     float_or_none,
     float_or_none,
     int_or_none,
     int_or_none,
-)
-from ..compat import (
-    compat_urllib_request
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -65,7 +63,7 @@ class ViddlerIE(InfoExtractor):
             'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
             'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
             video_id)
             video_id)
         headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
         headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
-        request = compat_urllib_request.Request(json_url, None, headers)
+        request = sanitized_Request(json_url, None, headers)
         data = self._download_json(request, video_id)['video']
         data = self._download_json(request, video_id)['video']
 
 
         formats = []
         formats = []

+ 2 - 2
youtube_dl/extractor/videomega.py

@@ -4,7 +4,7 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
 
 
 
 
 class VideoMegaIE(InfoExtractor):
 class VideoMegaIE(InfoExtractor):
@@ -30,7 +30,7 @@ class VideoMegaIE(InfoExtractor):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
         iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id
         iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id
-        req = compat_urllib_request.Request(iframe_url)
+        req = sanitized_Request(iframe_url)
         req.add_header('Referer', url)
         req.add_header('Referer', url)
         req.add_header('Cookie', 'noadvtday=0')
         req.add_header('Cookie', 'noadvtday=0')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)

+ 2 - 2
youtube_dl/extractor/viewster.py

@@ -4,7 +4,6 @@ from __future__ import unicode_literals
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_HTTPError,
     compat_HTTPError,
-    compat_urllib_request,
     compat_urllib_parse,
     compat_urllib_parse,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
 )
 )
@@ -13,6 +12,7 @@ from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
     HEADRequest,
     HEADRequest,
 )
 )
 
 
@@ -76,7 +76,7 @@ class ViewsterIE(InfoExtractor):
     _ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01'
     _ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01'
 
 
     def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True):
     def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True):
-        request = compat_urllib_request.Request(url)
+        request = sanitized_Request(url)
         request.add_header('Accept', self._ACCEPT_HEADER)
         request.add_header('Accept', self._ACCEPT_HEADER)
         request.add_header('Auth-token', self._AUTH_TOKEN)
         request.add_header('Auth-token', self._AUTH_TOKEN)
         return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal)
         return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal)

+ 3 - 3
youtube_dl/extractor/viki.py

@@ -7,14 +7,14 @@ import hmac
 import hashlib
 import hashlib
 import itertools
 import itertools
 
 
+from .common import InfoExtractor
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     int_or_none,
     int_or_none,
     parse_age_limit,
     parse_age_limit,
     parse_iso8601,
     parse_iso8601,
+    sanitized_Request,
 )
 )
-from ..compat import compat_urllib_request
-from .common import InfoExtractor
 
 
 
 
 class VikiBaseIE(InfoExtractor):
 class VikiBaseIE(InfoExtractor):
@@ -43,7 +43,7 @@ class VikiBaseIE(InfoExtractor):
             hashlib.sha1
             hashlib.sha1
         ).hexdigest()
         ).hexdigest()
         url = self._API_URL_TEMPLATE % (query, sig)
         url = self._API_URL_TEMPLATE % (query, sig)
-        return compat_urllib_request.Request(
+        return sanitized_Request(
             url, json.dumps(post_data).encode('utf-8')) if post_data else url
             url, json.dumps(post_data).encode('utf-8')) if post_data else url
 
 
     def _call_api(self, path, video_id, note, timestamp=None, post_data=None):
     def _call_api(self, path, video_id, note, timestamp=None, post_data=None):

+ 2 - 2
youtube_dl/extractor/vk.py

@@ -8,11 +8,11 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     orderedSet,
     orderedSet,
+    sanitized_Request,
     str_to_int,
     str_to_int,
     unescapeHTML,
     unescapeHTML,
     unified_strdate,
     unified_strdate,
@@ -182,7 +182,7 @@ class VKIE(InfoExtractor):
             'pass': password.encode('cp1251'),
             'pass': password.encode('cp1251'),
         })
         })
 
 
-        request = compat_urllib_request.Request(
+        request = sanitized_Request(
             'https://login.vk.com/?act=login',
             'https://login.vk.com/?act=login',
             compat_urllib_parse.urlencode(login_form).encode('utf-8'))
             compat_urllib_parse.urlencode(login_form).encode('utf-8'))
         login_page = self._download_webpage(
         login_page = self._download_webpage(

+ 3 - 5
youtube_dl/extractor/vodlocker.py

@@ -2,10 +2,8 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
 
 
 
 
 class VodlockerIE(InfoExtractor):
 class VodlockerIE(InfoExtractor):
@@ -31,7 +29,7 @@ class VodlockerIE(InfoExtractor):
         if fields['op'] == 'download1':
         if fields['op'] == 'download1':
             self._sleep(3, video_id)  # they do detect when requests happen too fast!
             self._sleep(3, video_id)  # they do detect when requests happen too fast!
             post = compat_urllib_parse.urlencode(fields)
             post = compat_urllib_parse.urlencode(fields)
-            req = compat_urllib_request.Request(url, post)
+            req = sanitized_Request(url, post)
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
             webpage = self._download_webpage(
             webpage = self._download_webpage(
                 req, video_id, 'Downloading video page')
                 req, video_id, 'Downloading video page')

+ 3 - 5
youtube_dl/extractor/voicerepublic.py

@@ -3,14 +3,12 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urlparse,
-)
+from ..compat import compat_urlparse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     determine_ext,
     determine_ext,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -37,7 +35,7 @@ class VoiceRepublicIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         display_id = self._match_id(url)
         display_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(
+        req = sanitized_Request(
             compat_urlparse.urljoin(url, '/talks/%s' % display_id))
             compat_urlparse.urljoin(url, '/talks/%s' % display_id))
         # Older versions of Firefox get redirected to an "upgrade browser" page
         # Older versions of Firefox get redirected to an "upgrade browser" page
         req.add_header('User-Agent', 'youtube-dl')
         req.add_header('User-Agent', 'youtube-dl')

+ 5 - 3
youtube_dl/extractor/wistia.py

@@ -1,8 +1,10 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
-from ..utils import ExtractorError
+from ..utils import (
+    ExtractorError,
+    sanitized_Request,
+)
 
 
 
 
 class WistiaIE(InfoExtractor):
 class WistiaIE(InfoExtractor):
@@ -23,7 +25,7 @@ class WistiaIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        request = compat_urllib_request.Request(self._API_URL.format(video_id))
+        request = sanitized_Request(self._API_URL.format(video_id))
         request.add_header('Referer', url)  # Some videos require this.
         request.add_header('Referer', url)  # Some videos require this.
         data_json = self._download_json(request, video_id)
         data_json = self._download_json(request, video_id)
         if data_json.get('error'):
         if data_json.get('error'):

+ 3 - 5
youtube_dl/extractor/xfileshare.py

@@ -4,14 +4,12 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
 from ..utils import (
 from ..utils import (
     ExtractorError,
     ExtractorError,
     encode_dict,
     encode_dict,
     int_or_none,
     int_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -106,7 +104,7 @@ class XFileShareIE(InfoExtractor):
 
 
             post = compat_urllib_parse.urlencode(encode_dict(fields))
             post = compat_urllib_parse.urlencode(encode_dict(fields))
 
 
-            req = compat_urllib_request.Request(url, post)
+            req = sanitized_Request(url, post)
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
 
 
             webpage = self._download_webpage(req, video_id, 'Downloading video page')
             webpage = self._download_webpage(req, video_id, 'Downloading video page')

+ 3 - 5
youtube_dl/extractor/xtube.py

@@ -3,12 +3,10 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_request,
-    compat_urllib_parse_unquote,
-)
+from ..compat import compat_urllib_parse_unquote
 from ..utils import (
 from ..utils import (
     parse_duration,
     parse_duration,
+    sanitized_Request,
     str_to_int,
     str_to_int,
 )
 )
 
 
@@ -32,7 +30,7 @@ class XTubeIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        req = compat_urllib_request.Request(url)
+        req = sanitized_Request(url)
         req.add_header('Cookie', 'age_verified=1')
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
         webpage = self._download_webpage(req, video_id)
 
 

+ 3 - 5
youtube_dl/extractor/xvideos.py

@@ -3,14 +3,12 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse_unquote,
-    compat_urllib_request,
-)
+from ..compat import compat_urllib_parse_unquote
 from ..utils import (
 from ..utils import (
     clean_html,
     clean_html,
     ExtractorError,
     ExtractorError,
     determine_ext,
     determine_ext,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -48,7 +46,7 @@ class XVideosIE(InfoExtractor):
             'url': video_url,
             'url': video_url,
         }]
         }]
 
 
-        android_req = compat_urllib_request.Request(url)
+        android_req = sanitized_Request(url)
         android_req.add_header('User-Agent', self._ANDROID_USER_AGENT)
         android_req.add_header('User-Agent', self._ANDROID_USER_AGENT)
         android_webpage = self._download_webpage(android_req, video_id, fatal=False)
         android_webpage = self._download_webpage(android_req, video_id, fatal=False)
 
 

+ 2 - 2
youtube_dl/extractor/yandexmusic.py

@@ -8,11 +8,11 @@ from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
     compat_str,
     compat_str,
     compat_urllib_parse,
     compat_urllib_parse,
-    compat_urllib_request,
 )
 )
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
     float_or_none,
     float_or_none,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -154,7 +154,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
         if len(tracks) < len(track_ids):
         if len(tracks) < len(track_ids):
             present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')])
             present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')])
             missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
             missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
-            request = compat_urllib_request.Request(
+            request = sanitized_Request(
                 'https://music.yandex.ru/handlers/track-entries.jsx',
                 'https://music.yandex.ru/handlers/track-entries.jsx',
                 compat_urllib_parse.urlencode({
                 compat_urllib_parse.urlencode({
                     'entries': ','.join(missing_track_ids),
                     'entries': ','.join(missing_track_ids),

+ 5 - 4
youtube_dl/extractor/youku.py

@@ -4,12 +4,13 @@ from __future__ import unicode_literals
 import base64
 import base64
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..utils import ExtractorError
-
 from ..compat import (
 from ..compat import (
     compat_urllib_parse,
     compat_urllib_parse,
     compat_ord,
     compat_ord,
-    compat_urllib_request,
+)
+from ..utils import (
+    ExtractorError,
+    sanitized_Request,
 )
 )
 
 
 
 
@@ -187,7 +188,7 @@ class YoukuIE(InfoExtractor):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
         def retrieve_data(req_url, note):
         def retrieve_data(req_url, note):
-            req = compat_urllib_request.Request(req_url)
+            req = sanitized_Request(req_url)
 
 
             cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
             cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
             if cn_verification_proxy:
             if cn_verification_proxy:

+ 2 - 2
youtube_dl/extractor/youporn.py

@@ -3,9 +3,9 @@ from __future__ import unicode_literals
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
-from ..compat import compat_urllib_request
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
+    sanitized_Request,
     str_to_int,
     str_to_int,
     unescapeHTML,
     unescapeHTML,
     unified_strdate,
     unified_strdate,
@@ -63,7 +63,7 @@ class YouPornIE(InfoExtractor):
         video_id = mobj.group('id')
         video_id = mobj.group('id')
         display_id = mobj.group('display_id')
         display_id = mobj.group('display_id')
 
 
-        request = compat_urllib_request.Request(url)
+        request = sanitized_Request(url)
         request.add_header('Cookie', 'age_verified=1')
         request.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(request, display_id)
         webpage = self._download_webpage(request, display_id)
 
 

+ 3 - 3
youtube_dl/extractor/youtube.py

@@ -20,7 +20,6 @@ from ..compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlparse,
-    compat_urllib_request,
     compat_urlparse,
     compat_urlparse,
     compat_str,
     compat_str,
 )
 )
@@ -35,6 +34,7 @@ from ..utils import (
     orderedSet,
     orderedSet,
     parse_duration,
     parse_duration,
     remove_start,
     remove_start,
+    sanitized_Request,
     smuggle_url,
     smuggle_url,
     str_to_int,
     str_to_int,
     unescapeHTML,
     unescapeHTML,
@@ -114,7 +114,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
 
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
 
 
-        req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
+        req = sanitized_Request(self._LOGIN_URL, login_data)
         login_results = self._download_webpage(
         login_results = self._download_webpage(
             req, None,
             req, None,
             note='Logging in', errnote='unable to log in', fatal=False)
             note='Logging in', errnote='unable to log in', fatal=False)
@@ -147,7 +147,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
 
             tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
             tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
 
 
-            tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
+            tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
             tfa_results = self._download_webpage(
             tfa_results = self._download_webpage(
                 tfa_req, None,
                 tfa_req, None,
                 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
                 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)