Browse Source

Merge branch 'master' of https://github.com/rg3/youtube-dl

Conflicts:
	.gitignore
	LATEST_VERSION
	Makefile
	youtube-dl
	youtube-dl.exe
	youtube_dl/InfoExtractors.py
	youtube_dl/__init__.py
Jeff Crouse 12 years ago
parent
commit
258d5850c9

+ 13 - 11
.gitignore

@@ -1,17 +1,19 @@
 *.pyc
 *.pyc
 *.pyo
 *.pyo
 *~
 *~
+*.DS_Store
 wine-py2exe/
 wine-py2exe/
 py2exe.log
 py2exe.log
-youtube-dl
+*.kate-swp
+build/
+dist/
+MANIFEST
+README.txt
 youtube-dl.1
 youtube-dl.1
-LATEST_VERSION
-
-#OS X
-.DS_Store
-.AppleDouble
-.LSOverride
-Icon
-._*
-.Spotlight-V100
-.Trashes
+youtube-dl.bash-completion
+youtube-dl
+youtube-dl.exe
+youtube-dl.tar.gz
+.coverage
+cover/
+updates_key.pem

+ 17 - 0
.tarignore

@@ -0,0 +1,17 @@
+updates_key.pem
+*.pyc
+*.pyo
+youtube-dl.exe
+wine-py2exe/
+py2exe.log
+*.kate-swp
+build/
+dist/
+MANIFEST
+*.DS_Store
+youtube-dl.tar.gz
+.coverage
+cover/
+__pycache__/
+.git/
+*~

+ 10 - 5
.travis.yml

@@ -1,9 +1,14 @@
 language: python
 language: python
-#specify the python version
 python:
 python:
   - "2.6"
   - "2.6"
   - "2.7"
   - "2.7"
-#command to install the setup
-install:
-# command to run tests
-script: nosetests test --nocapture
+  - "3.3"
+script: nosetests test --verbose
+notifications:
+  email:
+    - filippo.valsorda@gmail.com
+    - phihag@phihag.de
+  irc:
+    channels:
+      - "irc.freenode.org#youtube-dl"
+    skip_join: true

+ 14 - 0
CHANGELOG

@@ -0,0 +1,14 @@
+2013.01.02  Codename: GIULIA
+
+    * Add support for ComedyCentral clips <nto>
+    * Corrected Vimeo description fetching <Nick Daniels>
+    * Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
+    * --verbose offers more environment info
+    * New info_dict field: uploader_id
+    * New updates system, with signature checking
+    * New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
+    * Fixed IEs: BlipTv
+    * Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
+    * Simplified IEs and test code
+    * Various (Python 3 and other) fixes
+    * Revamped and expanded tests

+ 1 - 0
LATEST_VERSION

@@ -0,0 +1 @@
+2012.10.09

+ 24 - 0
LICENSE

@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to <http://unlicense.org/>

+ 3 - 0
MANIFEST.in

@@ -0,0 +1,3 @@
+include README.md
+include test/*.py
+include test/*.json

+ 21 - 33
Makefile

@@ -1,8 +1,7 @@
-all: youtube-dl README.md youtube-dl.1 youtube-dl.bash-completion LATEST_VERSION
-# TODO: re-add youtube-dl.exe, and make sure it's 1. safe and 2. doesn't need sudo
+all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
 
 
 clean:
 clean:
-	rm -f youtube-dl youtube-dl.exe youtube-dl.1 LATEST_VERSION youtube_dl/*.pyc
+	rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/
 
 
 PREFIX=/usr/local
 PREFIX=/usr/local
 BINDIR=$(PREFIX)/bin
 BINDIR=$(PREFIX)/bin
@@ -17,43 +16,32 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
 	install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
 	install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
 	install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
 	install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
 
 
-.PHONY: all clean install youtube-dl.bash-completion
-# TODO un-phony README.md and youtube-dl.bash_completion by reading from .in files and generating from them
+test:
+	#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
+	nosetests --verbose test
+
+.PHONY: all clean install test
 
 
 youtube-dl: youtube_dl/*.py
 youtube-dl: youtube_dl/*.py
-	zip --quiet --junk-paths youtube-dl youtube_dl/*.py
+	zip --quiet youtube-dl youtube_dl/*.py
+	zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
 	echo '#!/usr/bin/env python' > youtube-dl
 	echo '#!/usr/bin/env python' > youtube-dl
 	cat youtube-dl.zip >> youtube-dl
 	cat youtube-dl.zip >> youtube-dl
 	rm youtube-dl.zip
 	rm youtube-dl.zip
 	chmod a+x youtube-dl
 	chmod a+x youtube-dl
 
 
-youtube-dl.exe: youtube_dl/*.py
-	bash devscripts/wine-py2exe.sh build_exe.py
-
 README.md: youtube_dl/*.py
 README.md: youtube_dl/*.py
-	@options=$$(COLUMNS=80 python -m youtube_dl --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/## \1/') && \
-		header=$$(sed -e '/.*# OPTIONS/,$$ d' README.md) && \
-		footer=$$(sed -e '1,/.*# FAQ/ d' README.md) && \
-		echo "$${header}" > README.md && \
-		echo >> README.md && \
-		echo '# OPTIONS' >> README.md && \
-		echo "$${options}" >> README.md&& \
-		echo >> README.md && \
-		echo '# FAQ' >> README.md && \
-		echo "$${footer}" >> README.md
-
-youtube-dl.1: 
-	pandoc -s -w man README.md -o youtube-dl.1
-
-youtube-dl.bash-completion: 
-	@options=`egrep -o '(--[a-z-]+) ' README.md | sort -u | xargs echo` && \
-		content=`sed "s/opts=\"[^\"]*\"/opts=\"$${options}\"/g" youtube-dl.bash-completion` && \
-		echo "$${content}" > youtube-dl.bash-completion
-
-LATEST_VERSION: youtube_dl/__init__.py
-	python -m youtube_dl --version > LATEST_VERSION
+	COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
 
 
-test:
-	nosetests2 --nocapture test
+README.txt: README.md
+	pandoc -f markdown -t plain README.md -o README.txt
+
+youtube-dl.1: README.md
+	pandoc -s -f markdown -t man README.md -o youtube-dl.1
+
+youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
+	python devscripts/bash-completion.py
 
 
-.PHONY: default compile update update-latest update-readme test clean
+youtube-dl.tar.gz: all
+	tar -cvzf youtube-dl.tar.gz -s "|^./|./youtube-dl/|" \
+		--exclude-from=".tarignore" -- .

+ 52 - 18
README.md

@@ -1,4 +1,4 @@
-% youtube-dl(1)
+% YOUTUBE-DL(1)
 
 
 # NAME
 # NAME
 youtube-dl
 youtube-dl
@@ -20,6 +20,11 @@ which means you can modify it, redistribute it or use it however you like.
     -i, --ignore-errors      continue on download errors
     -i, --ignore-errors      continue on download errors
     -r, --rate-limit LIMIT   download rate limit (e.g. 50k or 44.6m)
     -r, --rate-limit LIMIT   download rate limit (e.g. 50k or 44.6m)
     -R, --retries RETRIES    number of retries (default is 10)
     -R, --retries RETRIES    number of retries (default is 10)
+    --buffer-size SIZE       size of download buffer (e.g. 1024 or 16k) (default
+                             is 1024)
+    --no-resize-buffer       do not automatically adjust the buffer size. By
+                             default, the buffer size is automatically resized
+                             from an initial value of SIZE.
     --dump-user-agent        display the current browser identification
     --dump-user-agent        display the current browser identification
     --user-agent UA          specify a custom user agent
     --user-agent UA          specify a custom user agent
     --list-extractors        List all supported extractors and the URLs they
     --list-extractors        List all supported extractors and the URLs they
@@ -37,16 +42,22 @@ which means you can modify it, redistribute it or use it however you like.
   Filesystem Options:
   Filesystem Options:
     -t, --title              use title in file name
     -t, --title              use title in file name
     --id                     use video ID in file name
     --id                     use video ID in file name
-    -l, --literal            use literal title in file name
+    -l, --literal            [deprecated] alias of --title
     -A, --auto-number        number downloaded files starting from 00000
     -A, --auto-number        number downloaded files starting from 00000
-    -o, --output TEMPLATE    output filename template. Use %(stitle)s to get the
+    -o, --output TEMPLATE    output filename template. Use %(title)s to get the
                              title, %(uploader)s for the uploader name,
                              title, %(uploader)s for the uploader name,
-                             %(autonumber)s to get an automatically incremented
-                             number, %(ext)s for the filename extension,
-                             %(upload_date)s for the upload date (YYYYMMDD),
-                             %(extractor)s for the provider (youtube, metacafe,
-                             etc), %(id)s for the video id and %% for a literal
-                             percent. Use - to output to stdout.
+                             %(uploader_id)s for the uploader nickname if
+                             different, %(autonumber)s to get an automatically
+                             incremented number, %(ext)s for the filename
+                             extension, %(upload_date)s for the upload date
+                             (YYYYMMDD), %(extractor)s for the provider
+                             (youtube, metacafe, etc), %(id)s for the video id
+                             and %% for a literal percent. Use - to output to
+                             stdout. Can also be used to download to a different
+                             directory, for example with -o '/my/downloads/%(upl
+                             oader)s/%(title)s-%(id)s.%(ext)s' .
+    --restrict-filenames     Restrict filenames to only ASCII characters, and
+                             avoid "&" and spaces in filenames
     -a, --batch-file FILE    file containing URLs to download ('-' for stdin)
     -a, --batch-file FILE    file containing URLs to download ('-' for stdin)
     -w, --no-overwrites      do not overwrite files
     -w, --no-overwrites      do not overwrite files
     -c, --continue           resume partially downloaded files
     -c, --continue           resume partially downloaded files
@@ -101,6 +112,34 @@ which means you can modify it, redistribute it or use it however you like.
                              specific bitrate like 128K (default 5)
                              specific bitrate like 128K (default 5)
     -k, --keep-video         keeps the video file on disk after the post-
     -k, --keep-video         keeps the video file on disk after the post-
                              processing; the video is erased by default
                              processing; the video is erased by default
+    --no-post-overwrites     do not overwrite post-processed files; the post-
+                             processed files are overwritten by default
+
+# CONFIGURATION
+
+You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.local/config/youtube-dl.conf`.
+
+# OUTPUT TEMPLATE
+
+The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
+
+ - `id`: The sequence will be replaced by the video identifier.
+ - `url`: The sequence will be replaced by the video URL.
+ - `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
+ - `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
+ - `title`: The sequence will be replaced by the video title.
+ - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
+ - `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
+ - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
+
+The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
+
+In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
+
+    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
+    youtube-dl test video ''_ä↭𝕐.mp4    # All kinds of weird characters
+    $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
+    youtube-dl_test_video_.mp4          # A simple file name
 
 
 # FAQ
 # FAQ
 
 
@@ -137,17 +176,9 @@ The error
 
 
 means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
 means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
 
 
-To run youtube-dl under Python 2.5, you'll have to manually check it out like this:
-
-	git clone git://github.com/rg3/youtube-dl.git
-	cd youtube-dl
-	python -m youtube_dl --help
-
-Please note that Python 2.5 is not supported anymore.
-
 ### What is this binary file? Where has the code gone?
 ### What is this binary file? Where has the code gone?
 
 
-Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repo to see the code. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make compile`.
+Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
 
 
 ### The exe throws a *Runtime error from Visual C++*
 ### The exe throws a *Runtime error from Visual C++*
 
 
@@ -166,6 +197,9 @@ Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/i
 Please include:
 Please include:
 
 
 * Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
 * Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
+* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
 * The output of `youtube-dl --version`
 * The output of `youtube-dl --version`
 * The output of `python --version`
 * The output of `python --version`
 * The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
 * The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
+
+For discussions, join us in the irc channel #youtube-dl on freenode.

+ 6 - 0
bin/youtube-dl

@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+import youtube_dl
+
+if __name__ == '__main__':
+    youtube_dl.main()

+ 0 - 48
build_exe.py

@@ -1,48 +0,0 @@
-from distutils.core import setup
-import py2exe
-import sys, os
-
-"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
-
-# If run without args, build executables
-if len(sys.argv) == 1:
-    sys.argv.append("py2exe")
-
-# os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # conflict with wine-py2exe.sh
-sys.path.append('./youtube_dl')
-
-options = {
-    "bundle_files": 1,
-    "compressed": 1,
-    "optimize": 2,
-    "dist_dir": '.',
-    "dll_excludes": ['w9xpopen.exe']
-}
-
-console = [{
-    "script":"./youtube_dl/__main__.py",
-    "dest_base": "youtube-dl",
-}]
-
-init_file = open('./youtube_dl/__init__.py')
-for line in init_file.readlines():
-    if line.startswith('__version__'):
-        version = line[11:].strip(" ='\n")
-        break
-else:
-    version = ''
-
-setup(name='youtube-dl',
-      version=version,
-      description='Small command-line program to download videos from YouTube.com and other video sites',
-      url='https://github.com/rg3/youtube-dl',
-      packages=['youtube_dl'],
-      
-      console = console,
-      options = {"py2exe": options},
-      zipfile = None,
-)
-
-import shutil
-shutil.rmtree("build")
-

+ 14 - 0
devscripts/bash-completion.in

@@ -0,0 +1,14 @@
+__youtube-dl()
+{
+    local cur prev opts
+    COMPREPLY=()
+    cur="${COMP_WORDS[COMP_CWORD]}"
+    opts="{{flags}}"
+
+    if [[ ${cur} == * ]] ; then
+        COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+        return 0
+    fi
+}
+
+complete -F __youtube-dl youtube-dl

+ 26 - 0
devscripts/bash-completion.py

@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+
+BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
+BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
+
+def build_completion(opt_parser):
+    opts_flag = []
+    for group in opt_parser.option_groups:
+        for option in group.option_list:
+            #for every long flag
+            opts_flag.append(option.get_opt_string())
+    with open(BASH_COMPLETION_TEMPLATE) as f:
+        template = f.read()
+    with open(BASH_COMPLETION_FILE, "w") as f:
+        #just using the special char
+        filled_template = template.replace("{{flags}}", " ".join(opts_flag))
+        f.write(filled_template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)

+ 33 - 0
devscripts/gh-pages/add-version.py

@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+
+import json
+import sys
+import hashlib
+import urllib.request
+
+if len(sys.argv) <= 1:
+	print('Specify the version number as parameter')
+	sys.exit()
+version = sys.argv[1]
+
+with open('update/LATEST_VERSION', 'w') as f:
+	f.write(version)
+
+versions_info = json.load(open('update/versions.json'))
+if 'signature' in versions_info:
+	del versions_info['signature']
+
+new_version = {}
+
+filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
+for key, filename in filenames.items():
+	print('Downloading and checksumming %s...' %filename)
+	url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
+	data = urllib.request.urlopen(url).read()
+	sha256sum = hashlib.sha256(data).hexdigest()
+	new_version[key] = (url, sha256sum)
+
+versions_info['versions'][version] = new_version
+versions_info['latest'] = version
+
+json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

+ 32 - 0
devscripts/gh-pages/generate-download.py

@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+import hashlib
+import shutil
+import subprocess
+import tempfile
+import urllib.request
+import json
+
+versions_info = json.load(open('update/versions.json'))
+version = versions_info['latest']
+URL = versions_info['versions'][version]['bin'][0]
+
+data = urllib.request.urlopen(URL).read()
+
+# Read template page
+with open('download.html.in', 'r', encoding='utf-8') as tmplf:
+    template = tmplf.read()
+
+md5sum = hashlib.md5(data).hexdigest()
+sha1sum = hashlib.sha1(data).hexdigest()
+sha256sum = hashlib.sha256(data).hexdigest()
+template = template.replace('@PROGRAM_VERSION@', version)
+template = template.replace('@PROGRAM_URL@', URL)
+template = template.replace('@PROGRAM_MD5SUM@', md5sum)
+template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
+template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
+template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
+template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
+template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
+template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
+with open('download.html', 'w', encoding='utf-8') as dlf:
+    dlf.write(template)

+ 28 - 0
devscripts/gh-pages/sign-versions.py

@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+
+import rsa
+import json
+from binascii import hexlify
+
+versions_info = json.load(open('update/versions.json'))
+if 'signature' in versions_info:
+	del versions_info['signature']
+
+print('Enter the PKCS1 private key, followed by a blank line:')
+privkey = ''
+while True:
+	try:
+		line = input()
+	except EOFError:
+		break
+	if line == '':
+		break
+	privkey += line + '\n'
+privkey = bytes(privkey, 'ascii')
+privkey = rsa.PrivateKey.load_pkcs1(privkey)
+
+signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
+print('signature: ' + signature)
+
+versions_info['signature'] = signature
+json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

+ 21 - 0
devscripts/gh-pages/update-copyright.py

@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import with_statement
+
+import datetime
+import glob
+import io # For Python 2 compatibilty
+import os
+import re
+
+year = str(datetime.datetime.now().year)
+for fn in glob.glob('*.html*'):
+    with io.open(fn, encoding='utf-8') as f:
+        content = f.read()
+    newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
+    if content != newc:
+        tmpFn = fn + '.part'
+        with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
+            outf.write(newc)
+        os.rename(tmpFn, fn)

+ 20 - 0
devscripts/make_readme.py

@@ -0,0 +1,20 @@
+import sys
+import re
+
+README_FILE = 'README.md'
+helptext = sys.stdin.read()
+
+with open(README_FILE) as f:
+    oldreadme = f.read()
+
+header = oldreadme[:oldreadme.index('# OPTIONS')]
+footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
+
+options = helptext[helptext.index('  General Options:')+19:]
+options = re.sub(r'^  (\w.+)$', r'## \1', options, flags=re.M)
+options = '# OPTIONS\n' + options + '\n'
+
+with open(README_FILE, 'w') as f:
+    f.write(header)
+    f.write(options)
+    f.write(footer)

+ 79 - 5
devscripts/release.sh

@@ -1,11 +1,85 @@
 #!/bin/sh
 #!/bin/sh
 
 
+# IMPORTANT: the following assumptions are made
+# * the GH repo is on the origin remote
+# * the gh-pages branch is named so locally
+# * the git config user.signingkey is properly set
+
+# You will need
+# pip install coverage nose rsa
+
+# TODO
+# release notes
+# make hash on local files
+
+set -e
+
 if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
 if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
 version="$1"
 version="$1"
 if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
 if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
-if [ ! -z "`git status --porcelain`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
-sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/__init__.py
-make all
-git add -A
+if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
+if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
+
+echo "\n### First of all, testing..."
+make clean
+nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1
+
+echo "\n### Changing version in version.py..."
+sed -i~ "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
+
+echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
+make README.md
+git add CHANGELOG README.md youtube_dl/version.py
 git commit -m "release $version"
 git commit -m "release $version"
-git tag -m "Release $version" "$version"
+
+echo "\n### Now tagging, signing and pushing..."
+git tag -s -m "Release $version" "$version"
+git show "$version"
+read -p "Is it good, can I push? (y/n) " -n 1
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
+echo
+MASTER=$(git rev-parse --abbrev-ref HEAD)
+git push origin $MASTER:master
+git push origin "$version"
+
+echo "\n### OK, now it is time to build the binaries..."
+REV=$(git rev-parse HEAD)
+make youtube-dl youtube-dl.tar.gz
+wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
+	wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
+mkdir -p "update_staging/$version"
+mv youtube-dl youtube-dl.exe "update_staging/$version"
+mv youtube-dl.tar.gz "update_staging/$version/youtube-dl-$version.tar.gz"
+RELEASE_FILES=youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz
+(cd update_staging/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
+(cd update_staging/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
+(cd update_staging/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
+(cd update_staging/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
+git checkout HEAD -- youtube-dl youtube-dl.exe
+
+echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
+for f in $RELEASE_FILES; do gpg --detach-sig "update_staging/$version/$f"; done
+scp -r "update_staging/$version" ytdl@youtube-dl.org:html/downloads/
+rm -r update_staging
+
+echo "\n### Now switching to gh-pages..."
+git checkout gh-pages
+git checkout "$MASTER" -- devscripts/gh-pages/
+git reset devscripts/gh-pages/
+devscripts/gh-pages/add-version.py $version
+devscripts/gh-pages/sign-versions.py < updates_key.pem
+devscripts/gh-pages/generate-download.py
+devscripts/gh-pages/update-copyright.py
+rm -r test_coverage
+mv cover test_coverage
+git add *.html *.html.in update test_coverage
+git commit -m "release $version"
+git show HEAD
+read -p "Is it good, can I push? (y/n) " -n 1
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
+echo
+git push origin gh-pages
+
+echo "\n### DONE!"
+rm -r devscripts
+git checkout $MASTER

+ 40 - 0
devscripts/transition_helper.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+import sys, os
+
+try:
+    import urllib.request as compat_urllib_request
+except ImportError: # Python 2
+    import urllib2 as compat_urllib_request
+
+sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
+sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
+sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
+
+try:
+	raw_input()
+except NameError: # Python 3
+	input()
+
+filename = sys.argv[0]
+
+API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
+BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
+
+if not os.access(filename, os.W_OK):
+    sys.exit('ERROR: no write permissions on %s' % filename)
+
+try:
+    urlh = compat_urllib_request.urlopen(BIN_URL)
+    newcontent = urlh.read()
+    urlh.close()
+except (IOError, OSError) as err:
+    sys.exit('ERROR: unable to download latest version')
+
+try:
+    with open(filename, 'wb') as outf:
+        outf.write(newcontent)
+except (IOError, OSError) as err:
+    sys.exit('ERROR: unable to overwrite current version')
+
+sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

+ 12 - 0
devscripts/transition_helper_exe/setup.py

@@ -0,0 +1,12 @@
+from distutils.core import setup
+import py2exe
+
+py2exe_options = {
+    "bundle_files": 1,
+    "compressed": 1,
+    "optimize": 2,
+    "dist_dir": '.',
+    "dll_excludes": ['w9xpopen.exe']
+}
+
+setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)

+ 102 - 0
devscripts/transition_helper_exe/youtube-dl.py

@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+import sys, os
+import urllib2
+import json, hashlib
+
+def rsa_verify(message, signature, key):
+    from struct import pack
+    from hashlib import sha256
+    from sys import version_info
+    def b(x):
+        if version_info[0] == 2: return x
+        else: return x.encode('latin1')
+    assert(type(message) == type(b('')))
+    block_size = 0
+    n = key[0]
+    while n:
+        block_size += 1
+        n >>= 8
+    signature = pow(int(signature, 16), key[1], key[0])
+    raw_bytes = []
+    while signature:
+        raw_bytes.insert(0, pack("B", signature & 0xFF))
+        signature >>= 8
+    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
+    if signature[0:2] != b('\x00\x01'): return False
+    signature = signature[2:]
+    if not b('\x00') in signature: return False
+    signature = signature[signature.index(b('\x00'))+1:]
+    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+    signature = signature[19:]
+    if signature != sha256(message).digest(): return False
+    return True
+
+sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
+sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
+sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
+
+raw_input()
+
+filename = sys.argv[0]
+
+UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
+VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
+JSON_URL = UPDATE_URL + 'versions.json'
+UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
+
+if not os.access(filename, os.W_OK):
+    sys.exit('ERROR: no write permissions on %s' % filename)
+
+exe = os.path.abspath(filename)
+directory = os.path.dirname(exe)
+if not os.access(directory, os.W_OK):
+    sys.exit('ERROR: no write permissions on %s' % directory)
+
+try:
+    versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
+    versions_info = json.loads(versions_info)
+except:
+    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
+if not 'signature' in versions_info:
+    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
+signature = versions_info['signature']
+del versions_info['signature']
+if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
+    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
+
+version = versions_info['versions'][versions_info['latest']]
+
+try:
+    urlh = urllib2.urlopen(version['exe'][0])
+    newcontent = urlh.read()
+    urlh.close()
+except (IOError, OSError) as err:
+    sys.exit('ERROR: unable to download latest version')
+
+newcontent_hash = hashlib.sha256(newcontent).hexdigest()
+if newcontent_hash != version['exe'][1]:
+    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
+
+try:
+    with open(exe + '.new', 'wb') as outf:
+        outf.write(newcontent)
+except (IOError, OSError) as err:
+    sys.exit(u'ERROR: unable to write the new version')
+
+try:
+    bat = os.path.join(directory, 'youtube-dl-updater.bat')
+    b = open(bat, 'w')
+    b.write("""
+echo Updating youtube-dl...
+ping 127.0.0.1 -n 5 -w 1000 > NUL
+move /Y "%s.new" "%s"
+del "%s"
+    \n""" %(exe, exe, bat))
+    b.close()
+
+    os.startfile(bat)
+except (IOError, OSError) as err:
+    sys.exit('ERROR: unable to overwrite current version')
+
+sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

+ 74 - 0
setup.py

@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from __future__ import print_function
+from distutils.core import setup
+import pkg_resources
+import sys
+
+try:
+    import py2exe
+    """This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
+except ImportError:
+    if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
+        print("Cannot import py2exe", file=sys.stderr)
+        exit(1)
+
+py2exe_options = {
+    "bundle_files": 1,
+    "compressed": 1,
+    "optimize": 2,
+    "dist_dir": '.',
+    "dll_excludes": ['w9xpopen.exe']
+}
+py2exe_console = [{
+    "script": "./youtube_dl/__main__.py",
+    "dest_base": "youtube-dl",
+}]
+py2exe_params = {
+    'console': py2exe_console,
+    'options': { "py2exe": py2exe_options },
+    'zipfile': None
+}
+
+if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
+    params = py2exe_params
+else:
+    params = {
+        'scripts': ['bin/youtube-dl'],
+        'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo...
+                       ('share/doc/youtube_dl', ['README.txt']),
+                       ('share/man/man1/', ['youtube-dl.1'])]
+    }
+
+# Get the version from youtube_dl/version.py without importing the package
+exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec'))
+
+setup(
+    name = 'youtube_dl',
+    version = __version__,
+    description = 'YouTube video downloader',
+    long_description = 'Small command-line program to download videos from YouTube.com and other video sites.',
+    url = 'https://github.com/rg3/youtube-dl',
+    author = 'Ricardo Garcia',
+    maintainer = 'Philipp Hagemeister',
+    maintainer_email = 'phihag@phihag.de',
+    packages = ['youtube_dl'],
+
+    # Provokes warning on most systems (why?!)
+    #test_suite = 'nose.collector',
+    #test_requires = ['nosetest'],
+
+    classifiers = [
+        "Topic :: Multimedia :: Video",
+        "Development Status :: 5 - Production/Stable",
+        "Environment :: Console",
+        "License :: Public Domain",
+        "Programming Language :: Python :: 2.6",
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3",
+        "Programming Language :: Python :: 3.3"
+    ],
+
+    **params
+)

+ 40 - 1
test/parameters.json

@@ -1 +1,40 @@
-{"username": null, "listformats": null, "skip_download": false, "usenetrc": false, "max_downloads": null, "noprogress": false, "forcethumbnail": false, "forceformat": false, "format_limit": null, "ratelimit": null, "nooverwrites": false, "forceurl": false, "writeinfojson": false, "simulate": false, "playliststart": 1, "continuedl": true, "password": null, "prefer_free_formats": false, "nopart": false, "retries": 10, "updatetime": true, "consoletitle": false, "verbose": true, "forcefilename": false, "ignoreerrors": false, "logtostderr": false, "format": null, "subtitleslang": null, "quiet": false, "outtmpl": "%(id)s.%(ext)s", "rejecttitle": null, "playlistend": -1, "writedescription": false, "forcetitle": false, "forcedescription": false, "writesubtitles": false, "matchtitle": null}
+{
+    "consoletitle": false, 
+    "continuedl": true, 
+    "forcedescription": false, 
+    "forcefilename": false, 
+    "forceformat": false, 
+    "forcethumbnail": false, 
+    "forcetitle": false, 
+    "forceurl": false, 
+    "format": null, 
+    "format_limit": null, 
+    "ignoreerrors": false, 
+    "listformats": null, 
+    "logtostderr": false, 
+    "matchtitle": null, 
+    "max_downloads": null, 
+    "nooverwrites": false, 
+    "nopart": false, 
+    "noprogress": false, 
+    "outtmpl": "%(id)s.%(ext)s", 
+    "password": null, 
+    "playlistend": -1, 
+    "playliststart": 1, 
+    "prefer_free_formats": false, 
+    "quiet": false, 
+    "ratelimit": null, 
+    "rejecttitle": null, 
+    "retries": 10, 
+    "simulate": false, 
+    "skip_download": false, 
+    "subtitleslang": null, 
+    "test": true, 
+    "updatetime": true, 
+    "usenetrc": false, 
+    "username": null, 
+    "verbose": true, 
+    "writedescription": false, 
+    "writeinfojson": true, 
+    "writesubtitles": false
+}

+ 27 - 0
test/test_all_urls.py

@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+import sys
+import unittest
+
+# Allow direct execution
+import os
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE
+
+class TestAllURLsMatching(unittest.TestCase):
+    def test_youtube_playlist_matching(self):
+        self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
+        self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
+        self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
+
+    def test_youtube_matching(self):
+        self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
+
+    def test_youtube_extract(self):
+        self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
+        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
+        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
+
+if __name__ == '__main__':
+    unittest.main()

+ 121 - 89
test/test_download.py

@@ -1,93 +1,125 @@
-#!/usr/bin/env python2
-import unittest
+#!/usr/bin/env python
+
+import errno
 import hashlib
 import hashlib
+import io
 import os
 import os
 import json
 import json
+import unittest
+import sys
+import hashlib
+import socket
+
+# Allow direct execution
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+import youtube_dl.FileDownloader
+import youtube_dl.InfoExtractors
+from youtube_dl.utils import *
+
+DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
+PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
+
+# General configuration (from __init__, not very elegant...)
+jar = compat_cookiejar.CookieJar()
+cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+proxy_handler = compat_urllib_request.ProxyHandler()
+opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+compat_urllib_request.install_opener(opener)
+
+def _try_rm(filename):
+    """ Remove a file if it exists """
+    try:
+        os.remove(filename)
+    except OSError as ose:
+        if ose.errno != errno.ENOENT:
+            raise
+
+class FileDownloader(youtube_dl.FileDownloader):
+    def __init__(self, *args, **kwargs):
+        self.to_stderr = self.to_screen
+        self.processed_info_dicts = []
+        return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
+    def process_info(self, info_dict):
+        self.processed_info_dicts.append(info_dict)
+        return youtube_dl.FileDownloader.process_info(self, info_dict)
+
+def _file_md5(fn):
+    with open(fn, 'rb') as f:
+        return hashlib.md5(f.read()).hexdigest()
+
+with io.open(DEF_FILE, encoding='utf-8') as deff:
+    defs = json.load(deff)
+with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+    parameters = json.load(pf)
+
+
+class TestDownload(unittest.TestCase):
+    def setUp(self):
+        self.parameters = parameters
+        self.defs = defs
+
+### Dynamically generate tests
+def generator(test_case):
+
+    def test_template(self):
+        ie = getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
+        if not ie._WORKING:
+            print('Skipping: IE marked as not _WORKING')
+            return
+        if 'playlist' not in test_case and not test_case['file']:
+            print('Skipping: No output file specified')
+            return
+        if 'skip' in test_case:
+            print('Skipping: {0}'.format(test_case['skip']))
+            return
+
+        params = self.parameters.copy()
+        params.update(test_case.get('params', {}))
+
+        fd = FileDownloader(params)
+        fd.add_info_extractor(ie())
+        for ien in test_case.get('add_ie', []):
+            fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
+
+        test_cases = test_case.get('playlist', [test_case])
+        for tc in test_cases:
+            _try_rm(tc['file'])
+            _try_rm(tc['file'] + '.part')
+            _try_rm(tc['file'] + '.info.json')
+        try:
+            fd.download([test_case['url']])
+
+            for tc in test_cases:
+                if not test_case.get('params', {}).get('skip_download', False):
+                    self.assertTrue(os.path.exists(tc['file']))
+                self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
+                if 'md5' in tc:
+                    md5_for_file = _file_md5(tc['file'])
+                    self.assertEqual(md5_for_file, tc['md5'])
+                with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
+                    info_dict = json.load(infof)
+                for (info_field, value) in tc.get('info_dict', {}).items():
+                    if value.startswith('md5:'):
+                        md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest()
+                        self.assertEqual(value[3:], md5_info_value)
+                    else:
+                        self.assertEqual(value, info_dict.get(info_field))
+        finally:
+            for tc in test_cases:
+                _try_rm(tc['file'])
+                _try_rm(tc['file'] + '.part')
+                _try_rm(tc['file'] + '.info.json')
+
+    return test_template
+
+### And add them to TestDownload
+for test_case in defs:
+    test_method = generator(test_case)
+    test_method.__name__ = "test_{0}".format(test_case["name"])
+    setattr(TestDownload, test_method.__name__, test_method)
+    del test_method
+
 
 
-from youtube_dl.FileDownloader import FileDownloader
-from youtube_dl.InfoExtractors  import YoutubeIE, DailymotionIE
-from youtube_dl.InfoExtractors import  MetacafeIE, BlipTVIE
-
-
-class DownloadTest(unittest.TestCase):
-	PARAMETERS_FILE = "test/parameters.json"
-	#calculated with md5sum:
-	#md5sum (GNU coreutils) 8.19
-
-	YOUTUBE_SIZE = 1993883
-	YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc"
-	YOUTUBE_FILE = "BaW_jenozKc.mp4"
-
-	DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47"
-	DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech"
-	DAILYMOTION_FILE = "x33vw9.mp4"
-
-	METACAFE_SIZE = 5754305
-	METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/"
-	METACAFE_FILE = "_aUehQsCQtM.flv"
-
-	BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7"
-	BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352"
-	BLIP_FILE = "5779306.m4v"
-
-	XVIDEO_MD5 = ""
-	XVIDEO_URL = ""
-	XVIDEO_FILE = ""
-
-
-	def test_youtube(self):
-		#let's download a file from youtube
-		with open(DownloadTest.PARAMETERS_FILE) as f:
-			fd = FileDownloader(json.load(f))
-		fd.add_info_extractor(YoutubeIE())
-		fd.download([DownloadTest.YOUTUBE_URL])
-		self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE))
-		self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE)
-
-	def test_dailymotion(self):
-		with open(DownloadTest.PARAMETERS_FILE) as f:
-			fd = FileDownloader(json.load(f))
-		fd.add_info_extractor(DailymotionIE())
-		fd.download([DownloadTest.DAILYMOTION_URL])
-		self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE))
-		md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE)
-		self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5)
-
-	def test_metacafe(self):
-		#this emulate a skip,to be 2.6 compatible
-		with open(DownloadTest.PARAMETERS_FILE) as f:
-			fd = FileDownloader(json.load(f))
-		fd.add_info_extractor(MetacafeIE())
-		fd.add_info_extractor(YoutubeIE())
-		fd.download([DownloadTest.METACAFE_URL])
-		self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE))
-		self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE)
-
-	def test_blip(self):
-		with open(DownloadTest.PARAMETERS_FILE) as f:
-			fd = FileDownloader(json.load(f))
-		fd.add_info_extractor(BlipTVIE())
-		fd.download([DownloadTest.BLIP_URL])
-		self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE))
-		md5_down_file = md5_for_file(DownloadTest.BLIP_FILE)
-		self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5)
-
-	def tearDown(self):
-		if os.path.exists(DownloadTest.YOUTUBE_FILE):
-			os.remove(DownloadTest.YOUTUBE_FILE)
-		if os.path.exists(DownloadTest.DAILYMOTION_FILE):
-			os.remove(DownloadTest.DAILYMOTION_FILE)
-		if os.path.exists(DownloadTest.METACAFE_FILE):
-			os.remove(DownloadTest.METACAFE_FILE)
-		if os.path.exists(DownloadTest.BLIP_FILE):
-			os.remove(DownloadTest.BLIP_FILE)
-
-def md5_for_file(filename, block_size=2**20):
-    with open(filename) as f:
-        md5 = hashlib.md5()
-        while True:
-            data = f.read(block_size)
-            if not data:
-                break
-            md5.update(data)
-            return md5.hexdigest()
+if __name__ == '__main__':
+    unittest.main()

+ 26 - 0
test/test_execution.py

@@ -0,0 +1,26 @@
+import unittest
+
+import sys
+import os
+import subprocess
+
+rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+try:
+    _DEV_NULL = subprocess.DEVNULL
+except AttributeError:
+    _DEV_NULL = open(os.devnull, 'wb')
+
+class TestExecution(unittest.TestCase):
+    def test_import(self):
+        subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
+
+    def test_module_exec(self):
+        if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
+            subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
+
+    def test_main_exec(self):
+        subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
+
+if __name__ == '__main__':
+    unittest.main()

+ 79 - 26
test/test_utils.py

@@ -1,47 +1,100 @@
-# -*- coding: utf-8 -*-
+#!/usr/bin/env python
 
 
 # Various small unit tests
 # Various small unit tests
 
 
+import sys
 import unittest
 import unittest
 
 
+# Allow direct execution
+import os
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
 #from youtube_dl.utils import htmlentity_transform
 #from youtube_dl.utils import htmlentity_transform
 from youtube_dl.utils import timeconvert
 from youtube_dl.utils import timeconvert
 from youtube_dl.utils import sanitize_filename
 from youtube_dl.utils import sanitize_filename
 from youtube_dl.utils import unescapeHTML
 from youtube_dl.utils import unescapeHTML
 from youtube_dl.utils import orderedSet
 from youtube_dl.utils import orderedSet
 
 
+if sys.version_info < (3, 0):
+    _compat_str = lambda b: b.decode('unicode-escape')
+else:
+    _compat_str = lambda s: s
+
 
 
 class TestUtil(unittest.TestCase):
 class TestUtil(unittest.TestCase):
-	def test_timeconvert(self):
-		self.assertTrue(timeconvert('') is None)
-		self.assertTrue(timeconvert('bougrg') is None)
+    def test_timeconvert(self):
+        self.assertTrue(timeconvert('') is None)
+        self.assertTrue(timeconvert('bougrg') is None)
+
+    def test_sanitize_filename(self):
+        self.assertEqual(sanitize_filename('abc'), 'abc')
+        self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
+
+        self.assertEqual(sanitize_filename('123'), '123')
+
+        self.assertEqual('abc_de', sanitize_filename('abc/de'))
+        self.assertFalse('/' in sanitize_filename('abc/de///'))
+
+        self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
+        self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
+        self.assertEqual('yes no', sanitize_filename('yes? no'))
+        self.assertEqual('this - that', sanitize_filename('this: that'))
+
+        self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
+        aumlaut = _compat_str('\xe4')
+        self.assertEqual(sanitize_filename(aumlaut), aumlaut)
+        tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
+        self.assertEqual(sanitize_filename(tests), tests)
+
+        forbidden = '"\0\\/'
+        for fc in forbidden:
+            for fbc in forbidden:
+                self.assertTrue(fbc not in sanitize_filename(fc))
+
+    def test_sanitize_filename_restricted(self):
+        self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
+        self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
+
+        self.assertEqual(sanitize_filename('123', restricted=True), '123')
+
+        self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
+        self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
 
 
-	def test_sanitize_filename(self):
-		self.assertEqual(sanitize_filename(u'abc'), u'abc')
-		self.assertEqual(sanitize_filename(u'abc_d-e'), u'abc_d-e')
+        self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
+        self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
+        self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
+        self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
 
 
-		self.assertEqual(sanitize_filename(u'123'), u'123')
+        tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
+        self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
+        self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '')  # No empty filename
 
 
-		self.assertEqual(u'abc-de', sanitize_filename(u'abc/de'))
-		self.assertFalse(u'/' in sanitize_filename(u'abc/de///'))
+        forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
+        for fc in forbidden:
+            for fbc in forbidden:
+                self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
 
 
-		self.assertEqual(u'abc-de', sanitize_filename(u'abc/<>\\*|de'))
-		self.assertEqual(u'xxx', sanitize_filename(u'xxx/<>\\*|'))
-		self.assertEqual(u'yes no', sanitize_filename(u'yes? no'))
-		self.assertEqual(u'this - that', sanitize_filename(u'this: that'))
+        # Handle a common case more neatly
+        self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
+        self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
+        # .. but make sure the file name is never empty
+        self.assertTrue(sanitize_filename('-', restricted=True) != '')
+        self.assertTrue(sanitize_filename(':', restricted=True) != '')
 
 
-		self.assertEqual(sanitize_filename(u'ä'), u'ä')
-		self.assertEqual(sanitize_filename(u'кириллица'), u'кириллица')
+    def test_sanitize_ids(self):
+        self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
+        self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
+        self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
 
 
-		for forbidden in u'"\0\\/':
-			self.assertTrue(forbidden not in sanitize_filename(forbidden))
+    def test_ordered_set(self):
+        self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
+        self.assertEqual(orderedSet([]), [])
+        self.assertEqual(orderedSet([1]), [1])
+        #keep the list ordered
+        self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
 
 
-	def test_ordered_set(self):
-		self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7])
-		self.assertEqual(orderedSet([]), [])
-		self.assertEqual(orderedSet([1]), [1])
-		#keep the list ordered
-		self.assertEqual(orderedSet([135,1,1,1]), [135,1])
+    def test_unescape_html(self):
+        self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
 
 
-	def test_unescape_html(self):
-		self.assertEqual(unescapeHTML(u"%20;"), u"%20;")
+if __name__ == '__main__':
+    unittest.main()

+ 77 - 0
test/test_write_info_json.py

@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+import json
+import os
+import sys
+import unittest
+
+# Allow direct execution
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+import youtube_dl.FileDownloader
+import youtube_dl.InfoExtractors
+from youtube_dl.utils import *
+
+PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
+
+# General configuration (from __init__, not very elegant...)
+jar = compat_cookiejar.CookieJar()
+cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+proxy_handler = compat_urllib_request.ProxyHandler()
+opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+compat_urllib_request.install_opener(opener)
+
+class FileDownloader(youtube_dl.FileDownloader):
+    def __init__(self, *args, **kwargs):
+        youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
+        self.to_stderr = self.to_screen
+
+with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+    params = json.load(pf)
+params['writeinfojson'] = True
+params['skip_download'] = True
+params['writedescription'] = True
+
+TEST_ID = 'BaW_jenozKc'
+INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
+DESCRIPTION_FILE = TEST_ID + '.mp4.description'
+EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐
+
+This is a test video for youtube-dl.
+
+For more information, contact phihag@phihag.de .'''
+
+class TestInfoJSON(unittest.TestCase):
+    def setUp(self):
+        # Clear old files
+        self.tearDown()
+
+    def test_info_json(self):
+        ie = youtube_dl.InfoExtractors.YoutubeIE()
+        fd = FileDownloader(params)
+        fd.add_info_extractor(ie)
+        fd.download([TEST_ID])
+        self.assertTrue(os.path.exists(INFO_JSON_FILE))
+        with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
+            jd = json.load(jsonf)
+        self.assertEqual(jd['upload_date'], u'20121002')
+        self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
+        self.assertEqual(jd['id'], TEST_ID)
+        self.assertEqual(jd['extractor'], 'youtube')
+        self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
+        self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
+
+        self.assertTrue(os.path.exists(DESCRIPTION_FILE))
+        with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
+            descr = descf.read()
+        self.assertEqual(descr, EXPECTED_DESCRIPTION)
+
+    def tearDown(self):
+        if os.path.exists(INFO_JSON_FILE):
+            os.remove(INFO_JSON_FILE)
+        if os.path.exists(DESCRIPTION_FILE):
+            os.remove(DESCRIPTION_FILE)
+
+if __name__ == '__main__':
+    unittest.main()

+ 73 - 0
test/test_youtube_lists.py

@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+import sys
+import unittest
+import json
+
+# Allow direct execution
+import os
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
+from youtube_dl.utils import *
+
+PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
+with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+    parameters = json.load(pf)
+
+# General configuration (from __init__, not very elegant...)
+jar = compat_cookiejar.CookieJar()
+cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+proxy_handler = compat_urllib_request.ProxyHandler()
+opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+compat_urllib_request.install_opener(opener)
+
+class FakeDownloader(object):
+    def __init__(self):
+        self.result = []
+        self.params = parameters
+    def to_screen(self, s):
+        print(s)
+    def trouble(self, s):
+        raise Exception(s)
+    def download(self, x):
+        self.result.append(x)
+
+class TestYoutubeLists(unittest.TestCase):
+    def test_youtube_playlist(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
+        self.assertEqual(DL.result, [
+            ['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
+            ['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
+            ['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
+        ])
+
+    def test_youtube_playlist_long(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
+        self.assertTrue(len(DL.result) >= 799)
+
+    def test_youtube_course(self):
+        DL = FakeDownloader()
+        IE = YoutubePlaylistIE(DL)
+        # TODO find a > 100 (paginating?) videos course
+        IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
+        self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
+        self.assertEqual(len(DL.result), 25)
+        self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
+
+    def test_youtube_channel(self):
+        # I give up, please find a channel that does paginate and test this like test_youtube_playlist_long
+        pass # TODO
+
+    def test_youtube_user(self):
+        DL = FakeDownloader()
+        IE = YoutubeUserIE(DL)
+        IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
+        self.assertTrue(len(DL.result) >= 320)
+
+if __name__ == '__main__':
+    unittest.main()

+ 57 - 0
test/test_youtube_subtitles.py

@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+import sys
+import unittest
+import json
+import io
+import hashlib
+
+# Allow direct execution
+import os
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from youtube_dl.InfoExtractors import YoutubeIE
+from youtube_dl.utils import *
+
+PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
+with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+    parameters = json.load(pf)
+
+# General configuration (from __init__, not very elegant...)
+jar = compat_cookiejar.CookieJar()
+cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+proxy_handler = compat_urllib_request.ProxyHandler()
+opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+compat_urllib_request.install_opener(opener)
+
+class FakeDownloader(object):
+    def __init__(self):
+        self.result = []
+        self.params = parameters
+    def to_screen(self, s):
+        print(s)
+    def trouble(self, s):
+        raise Exception(s)
+    def download(self, x):
+        self.result.append(x)
+
+md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
+
+class TestYoutubeSubtitles(unittest.TestCase):
+    def test_youtube_subtitles(self):
+        DL = FakeDownloader()
+        DL.params['writesubtitles'] = True
+        IE = YoutubeIE(DL)
+        info_dict = IE.extract('QRS8MkLhQmM')
+        self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033')
+
+    def test_youtube_subtitles_it(self):
+        DL = FakeDownloader()
+        DL.params['writesubtitles'] = True
+        DL.params['subtitleslang'] = 'it'
+        IE = YoutubeIE(DL)
+        info_dict = IE.extract('QRS8MkLhQmM')
+        self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a')
+
+if __name__ == '__main__':
+    unittest.main()

+ 164 - 0
test/tests.json

@@ -0,0 +1,164 @@
+[
+  {
+    "name": "Youtube",
+    "url":  "http://www.youtube.com/watch?v=BaW_jenozKc",
+    "file":  "BaW_jenozKc.mp4",
+    "info_dict": {
+      "title": "youtube-dl test video \"'/\\ä↭𝕐",
+      "uploader": "Philipp Hagemeister",
+      "uploader_id": "phihag",
+      "upload_date": "20121002",
+      "description": "test chars:  \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
+    }
+  },
+  {
+    "name": "Dailymotion",
+    "md5":  "392c4b85a60a90dc4792da41ce3144eb",
+    "url":  "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech",
+    "file":  "x33vw9.mp4"
+  },
+  {
+    "name": "Metacafe",
+    "add_ie": ["Youtube"],
+    "url":  "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
+    "file":  "_aUehQsCQtM.flv"
+  },
+  {
+    "name": "BlipTV",
+    "md5":  "b2d849efcf7ee18917e4b4d9ff37cafe",
+    "url":  "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352",
+    "file":  "5779306.m4v"
+  },
+  {
+    "name": "XVideos",
+    "md5":  "1d0c835822f0a71a7bf011855db929d0",
+    "url":  "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
+    "file":  "939581.flv"
+  },
+  {
+    "name": "Vimeo",
+    "md5":  "8879b6cc097e987f02484baf890129e5",
+    "url":  "http://vimeo.com/56015672",
+    "file": "56015672.mp4",
+    "info_dict": {
+      "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
+      "uploader": "Filippo Valsorda",
+      "uploader_id": "user7108434",
+      "upload_date": "20121220",
+      "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
+    }
+  },
+  {
+    "name": "Soundcloud",
+    "md5":  "ebef0a451b909710ed1d7787dddbf0d7",
+    "url":  "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy",
+    "file":  "62986583.mp3"
+  },
+  {
+    "name": "StanfordOpenClassroom",
+    "md5":  "544a9468546059d4e80d76265b0443b8",
+    "url":  "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
+    "file":  "PracticalUnix_intro-environment.mp4"
+  },
+  {
+    "name": "XNXX",
+    "md5":  "0831677e2b4761795f68d417e0b7b445",
+    "url":  "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_",
+    "file":  "1135332.flv"
+  },
+  {
+    "name": "Youku",
+    "url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
+    "file": "XNDgyMDQ2NTQw_part00.flv",
+    "md5": "ffe3f2e435663dc2d1eea34faeff5b5b",
+    "params": { "test": false }
+  },
+  {
+    "name": "NBA",
+    "url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html",
+    "file": "0021200253-okc-bkn-recap.nba.mp4",
+    "md5": "c0edcfc37607344e2ff8f13c378c88a4"
+  },
+  {
+    "name": "JustinTV",
+    "url": "http://www.twitch.tv/thegamedevhub/b/296128360",
+    "file": "296128360.flv",
+    "md5": "ecaa8a790c22a40770901460af191c9a"
+  },
+  {
+    "name": "MyVideo",
+    "url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win",
+    "file": "8229274.flv",
+    "md5": "2d2753e8130479ba2cb7e0a37002053e"
+  },
+  {
+    "name": "Escapist",
+    "url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
+    "file": "6618-Breaking-Down-Baldurs-Gate.flv",
+    "md5": "c6793dbda81388f4264c1ba18684a74d",
+    "skip": "Fails with timeout on Travis"
+  },
+  {
+    "name": "GooglePlus",
+    "url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
+    "file": "ZButuJc6CtH.flv"
+  },
+  {
+    "name": "FunnyOrDie",
+    "url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version",
+    "file": "0732f586d7.mp4",
+    "md5": "f647e9e90064b53b6e046e75d0241fbd"
+  },
+  {
+    "name": "TweetReel",
+    "url": "http://tweetreel.com/?77smq",
+    "file": "77smq.mov",
+    "md5": "56b4d9ca9de467920f3f99a6d91255d6",
+    "info_dict": {
+        "uploader": "itszero",
+        "uploader_id": "itszero",
+        "upload_date": "20091225",
+        "description": "Installing Gentoo Linux on Powerbook G4, it turns out the sleep indicator becomes HDD activity indicator :D"
+    }
+  },
+  {
+    "name": "Steam",
+    "url": "http://store.steampowered.com/video/105600/",
+    "playlist": [
+      {
+        "file": "81300.flv",
+        "md5": "f870007cee7065d7c76b88f0a45ecc07",
+        "info_dict": {
+            "title": "Terraria 1.1 Trailer"
+        }
+      },
+      {
+        "file": "80859.flv",
+        "md5": "61aaf31a5c5c3041afb58fb83cbb5751",
+        "info_dict": {
+          "title": "Terraria Trailer"
+        }
+      }
+    ]
+  },
+  {
+    "name": "Ustream",
+    "url": "http://www.ustream.tv/recorded/20274954",
+    "file": "20274954.flv",
+    "md5": "088f151799e8f572f84eb62f17d73e5c",
+    "info_dict": {
+        "title": "Young Americans for Liberty February 7, 2012 2:28 AM"
+    }
+  },
+  {
+    "name": "InfoQ",
+    "url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
+    "file": "12-jan-pythonthings.mp4",
+    "info_dict": {
+      "title": "A Few of My Favorite [Python] Things"
+    },
+    "params": {
+      "skip_download": true
+    }
+  }
+]

BIN
youtube-dl


+ 0 - 14
youtube-dl.bash-completion

@@ -1,14 +0,0 @@
-__youtube-dl()
-{
-    local cur prev opts
-    COMPREPLY=()
-    cur="${COMP_WORDS[COMP_CWORD]}"
-    opts="--all-formats --audio-format --audio-quality --auto-number --batch-file --console-title --continue --cookies --dump-user-agent --extract-audio --format --get-description --get-filename --get-format --get-thumbnail --get-title --get-url --help --id --ignore-errors --keep-video --list-extractors --list-formats --literal --match-title --max-downloads --max-quality --netrc --no-continue --no-mtime --no-overwrites --no-part --no-progress --output --password --playlist-end --playlist-start --prefer-free-formats --quiet --rate-limit --reject-title --retries --simulate --skip-download --srt-lang --title --update --user-agent --username --verbose --version --write-description --write-info-json --write-srt"
-
-    if [[ ${cur} == * ]] ; then
-        COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
-        return 0
-    fi
-}
-
-complete -F __youtube-dl youtube-dl

BIN
youtube-dl.exe


+ 722 - 677
youtube_dl/FileDownloader.py

@@ -1,693 +1,738 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 
 
-import httplib
+from __future__ import absolute_import
+
 import math
 import math
+import io
 import os
 import os
 import re
 import re
 import socket
 import socket
 import subprocess
 import subprocess
 import sys
 import sys
 import time
 import time
-import urllib2
+import traceback
 
 
 if os.name == 'nt':
 if os.name == 'nt':
-	import ctypes
+    import ctypes
 
 
-from utils import *
+from .utils import *
 
 
 
 
 class FileDownloader(object):
 class FileDownloader(object):
-	"""File Downloader class.
-
-	File downloader objects are the ones responsible of downloading the
-	actual video file and writing it to disk if the user has requested
-	it, among some other tasks. In most cases there should be one per
-	program. As, given a video URL, the downloader doesn't know how to
-	extract all the needed information, task that InfoExtractors do, it
-	has to pass the URL to one of them.
-
-	For this, file downloader objects have a method that allows
-	InfoExtractors to be registered in a given order. When it is passed
-	a URL, the file downloader handles it to the first InfoExtractor it
-	finds that reports being able to handle it. The InfoExtractor extracts
-	all the information about the video or videos the URL refers to, and
-	asks the FileDownloader to process the video information, possibly
-	downloading the video.
-
-	File downloaders accept a lot of parameters. In order not to saturate
-	the object constructor with arguments, it receives a dictionary of
-	options instead. These options are available through the params
-	attribute for the InfoExtractors to use. The FileDownloader also
-	registers itself as the downloader in charge for the InfoExtractors
-	that are added to it, so this is a "mutual registration".
-
-	Available options:
-
-	username:         Username for authentication purposes.
-	password:         Password for authentication purposes.
-	usenetrc:         Use netrc for authentication instead.
-	quiet:            Do not print messages to stdout.
-	forceurl:         Force printing final URL.
-	forcetitle:       Force printing title.
-	forcethumbnail:   Force printing thumbnail URL.
-	forcedescription: Force printing description.
-	forcefilename:    Force printing final filename.
-	simulate:         Do not download the video files.
-	format:           Video format code.
-	format_limit:     Highest quality format to try.
-	outtmpl:          Template for output names.
-	ignoreerrors:     Do not stop on download errors.
-	ratelimit:        Download speed limit, in bytes/sec.
-	nooverwrites:     Prevent overwriting files.
-	retries:          Number of times to retry for HTTP error 5xx
-	continuedl:       Try to continue downloads if possible.
-	noprogress:       Do not print the progress bar.
-	playliststart:    Playlist item to start at.
-	playlistend:      Playlist item to end at.
-	matchtitle:       Download only matching titles.
-	rejecttitle:      Reject downloads for matching titles.
-	logtostderr:      Log messages to stderr instead of stdout.
-	consoletitle:     Display progress in console window's titlebar.
-	nopart:           Do not use temporary .part files.
-	updatetime:       Use the Last-modified header to set output file timestamps.
-	writedescription: Write the video description to a .description file
-	writeinfojson:    Write the video description to a .info.json file
-	writesubtitles:   Write the video subtitles to a .srt file
-	subtitleslang:    Language of the subtitles to download
-	"""
-
-	params = None
-	_ies = []
-	_pps = []
-	_download_retcode = None
-	_num_downloads = None
-	_screen_file = None
-
-	def __init__(self, params):
-		"""Create a FileDownloader object with the given options."""
-		self._ies = []
-		self._pps = []
-		self._download_retcode = 0
-		self._num_downloads = 0
-		self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
-		self.params = params
-
-	@staticmethod
-	def format_bytes(bytes):
-		if bytes is None:
-			return 'N/A'
-		if type(bytes) is str:
-			bytes = float(bytes)
-		if bytes == 0.0:
-			exponent = 0
-		else:
-			exponent = long(math.log(bytes, 1024.0))
-		suffix = 'bkMGTPEZY'[exponent]
-		converted = float(bytes) / float(1024 ** exponent)
-		return '%.2f%s' % (converted, suffix)
-
-	@staticmethod
-	def calc_percent(byte_counter, data_len):
-		if data_len is None:
-			return '---.-%'
-		return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
-
-	@staticmethod
-	def calc_eta(start, now, total, current):
-		if total is None:
-			return '--:--'
-		dif = now - start
-		if current == 0 or dif < 0.001: # One millisecond
-			return '--:--'
-		rate = float(current) / dif
-		eta = long((float(total) - float(current)) / rate)
-		(eta_mins, eta_secs) = divmod(eta, 60)
-		if eta_mins > 99:
-			return '--:--'
-		return '%02d:%02d' % (eta_mins, eta_secs)
-
-	@staticmethod
-	def calc_speed(start, now, bytes):
-		dif = now - start
-		if bytes == 0 or dif < 0.001: # One millisecond
-			return '%10s' % '---b/s'
-		return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
-
-	@staticmethod
-	def best_block_size(elapsed_time, bytes):
-		new_min = max(bytes / 2.0, 1.0)
-		new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
-		if elapsed_time < 0.001:
-			return long(new_max)
-		rate = bytes / elapsed_time
-		if rate > new_max:
-			return long(new_max)
-		if rate < new_min:
-			return long(new_min)
-		return long(rate)
-
-	@staticmethod
-	def parse_bytes(bytestr):
-		"""Parse a string indicating a byte quantity into a long integer."""
-		matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
-		if matchobj is None:
-			return None
-		number = float(matchobj.group(1))
-		multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
-		return long(round(number * multiplier))
-
-	def add_info_extractor(self, ie):
-		"""Add an InfoExtractor object to the end of the list."""
-		self._ies.append(ie)
-		ie.set_downloader(self)
-
-	def add_post_processor(self, pp):
-		"""Add a PostProcessor object to the end of the chain."""
-		self._pps.append(pp)
-		pp.set_downloader(self)
-
-	def to_screen(self, message, skip_eol=False):
-		"""Print message to stdout if not in quiet mode."""
-		assert type(message) == type(u'')
-		if not self.params.get('quiet', False):
-			terminator = [u'\n', u''][skip_eol]
-			output = message + terminator
-			if 'b' not in self._screen_file.mode or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
-				output = output.encode(preferredencoding(), 'ignore')
-			self._screen_file.write(output)
-			self._screen_file.flush()
-
-	def to_stderr(self, message):
-		"""Print message to stderr."""
-		print >>sys.stderr, message.encode(preferredencoding())
-
-	def to_cons_title(self, message):
-		"""Set console/terminal window title to message."""
-		if not self.params.get('consoletitle', False):
-			return
-		if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
-			# c_wchar_p() might not be necessary if `message` is
-			# already of type unicode()
-			ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
-		elif 'TERM' in os.environ:
-			sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
-
-	def fixed_template(self):
-		"""Checks if the output template is fixed."""
-		return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
-
-	def trouble(self, message=None):
-		"""Determine action to take when a download problem appears.
-
-		Depending on if the downloader has been configured to ignore
-		download errors or not, this method may throw an exception or
-		not when errors are found, after printing the message.
-		"""
-		if message is not None:
-			self.to_stderr(message)
-		if not self.params.get('ignoreerrors', False):
-			raise DownloadError(message)
-		self._download_retcode = 1
-
-	def slow_down(self, start_time, byte_counter):
-		"""Sleep if the download speed is over the rate limit."""
-		rate_limit = self.params.get('ratelimit', None)
-		if rate_limit is None or byte_counter == 0:
-			return
-		now = time.time()
-		elapsed = now - start_time
-		if elapsed <= 0.0:
-			return
-		speed = float(byte_counter) / elapsed
-		if speed > rate_limit:
-			time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
-
-	def temp_name(self, filename):
-		"""Returns a temporary filename for the given filename."""
-		if self.params.get('nopart', False) or filename == u'-' or \
-				(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
-			return filename
-		return filename + u'.part'
-
-	def undo_temp_name(self, filename):
-		if filename.endswith(u'.part'):
-			return filename[:-len(u'.part')]
-		return filename
-
-	def try_rename(self, old_filename, new_filename):
-		try:
-			if old_filename == new_filename:
-				return
-			os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
-		except (IOError, OSError), err:
-			self.trouble(u'ERROR: unable to rename file')
-
-	def try_utime(self, filename, last_modified_hdr):
-		"""Try to set the last-modified time of the given file."""
-		if last_modified_hdr is None:
-			return
-		if not os.path.isfile(encodeFilename(filename)):
-			return
-		timestr = last_modified_hdr
-		if timestr is None:
-			return
-		filetime = timeconvert(timestr)
-		if filetime is None:
-			return filetime
-		try:
-			os.utime(filename, (time.time(), filetime))
-		except:
-			pass
-		return filetime
-
-	def report_writedescription(self, descfn):
-		""" Report that the description file is being written """
-		self.to_screen(u'[info] Writing video description to: ' + descfn)
-
-	def report_writesubtitles(self, srtfn):
-		""" Report that the subtitles file is being written """
-		self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
-
-	def report_writeinfojson(self, infofn):
-		""" Report that the metadata file has been written """
-		self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
-
-	def report_destination(self, filename):
-		"""Report destination filename."""
-		self.to_screen(u'[download] Destination: ' + filename)
-
-	def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
-		"""Report download progress."""
-		if self.params.get('noprogress', False):
-			return
-		self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
-				(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
-		self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
-				(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
-
-	def report_resuming_byte(self, resume_len):
-		"""Report attempt to resume at given byte."""
-		self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
-
-	def report_retry(self, count, retries):
-		"""Report retry in case of HTTP error 5xx"""
-		self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
-
-	def report_file_already_downloaded(self, file_name):
-		"""Report file has already been fully downloaded."""
-		try:
-			self.to_screen(u'[download] %s has already been downloaded' % file_name)
-		except (UnicodeEncodeError), err:
-			self.to_screen(u'[download] The file has already been downloaded')
-
-	def report_unable_to_resume(self):
-		"""Report it was impossible to resume download."""
-		self.to_screen(u'[download] Unable to resume')
-
-	def report_finish(self):
-		"""Report download finished."""
-		if self.params.get('noprogress', False):
-			self.to_screen(u'[download] Download completed')
-		else:
-			self.to_screen(u'')
-
-	def increment_downloads(self):
-		"""Increment the ordinal that assigns a number to each file."""
-		self._num_downloads += 1
-
-	def prepare_filename(self, info_dict):
-		"""Generate the output filename."""
-		try:
-			template_dict = dict(info_dict)
-			template_dict['epoch'] = unicode(long(time.time()))
-			template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
-			filename = self.params['outtmpl'] % template_dict
-			return filename
-		except (ValueError, KeyError), err:
-			self.trouble(u'ERROR: invalid system charset or erroneous output template')
-			return None
-
-	def _match_entry(self, info_dict):
-		""" Returns None iff the file should be downloaded """
-
-		title = info_dict['title']
-		matchtitle = self.params.get('matchtitle', False)
-		if matchtitle:
-			matchtitle = matchtitle.decode('utf8')
-			if not re.search(matchtitle, title, re.IGNORECASE):
-				return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
-		rejecttitle = self.params.get('rejecttitle', False)
-		if rejecttitle:
-			rejecttitle = rejecttitle.decode('utf8')
-			if re.search(rejecttitle, title, re.IGNORECASE):
-				return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
-		return None
-
-	def process_info(self, info_dict):
-		"""Process a single dictionary returned by an InfoExtractor."""
-
-		info_dict['stitle'] = sanitize_filename(info_dict['title'])
-
-		reason = self._match_entry(info_dict)
-		if reason is not None:
-			self.to_screen(u'[download] ' + reason)
-			return
-
-		max_downloads = self.params.get('max_downloads')
-		if max_downloads is not None:
-			if self._num_downloads > int(max_downloads):
-				raise MaxDownloadsReached()
-
-		filename = self.prepare_filename(info_dict)
-
-		# Forced printings
-		if self.params.get('forcetitle', False):
-			print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
-		if self.params.get('forceurl', False):
-			print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
-		if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
-			print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
-		if self.params.get('forcedescription', False) and 'description' in info_dict:
-			print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
-		if self.params.get('forcefilename', False) and filename is not None:
-			print filename.encode(preferredencoding(), 'xmlcharrefreplace')
-		if self.params.get('forceformat', False):
-			print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace')
-
-		# Do nothing else if in simulate mode
-		if self.params.get('simulate', False):
-			return
-
-		if filename is None:
-			return
-
-		try:
-			dn = os.path.dirname(encodeFilename(filename))
-			if dn != '' and not os.path.exists(dn): # dn is already encoded
-				os.makedirs(dn)
-		except (OSError, IOError), err:
-			self.trouble(u'ERROR: unable to create directory ' + unicode(err))
-			return
-
-		if self.params.get('writedescription', False):
-			try:
-				descfn = filename + u'.description'
-				self.report_writedescription(descfn)
-				descfile = open(encodeFilename(descfn), 'wb')
-				try:
-					descfile.write(info_dict['description'].encode('utf-8'))
-				finally:
-					descfile.close()
-			except (OSError, IOError):
-				self.trouble(u'ERROR: Cannot write description file ' + descfn)
-				return
-
-		if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
-			# subtitles download errors are already managed as troubles in relevant IE
-			# that way it will silently go on when used with unsupporting IE
-			try:
-				srtfn = filename.rsplit('.', 1)[0] + u'.srt'
-				self.report_writesubtitles(srtfn)
-				srtfile = open(encodeFilename(srtfn), 'wb')
-				try:
-					srtfile.write(info_dict['subtitles'].encode('utf-8'))
-				finally:
-					srtfile.close()
-			except (OSError, IOError):
-				self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
-				return
-
-		if self.params.get('writeinfojson', False):
-			infofn = filename + u'.info.json'
-			self.report_writeinfojson(infofn)
-			try:
-				json.dump
-			except (NameError,AttributeError):
-				self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.')
-				return
-			try:
-				infof = open(encodeFilename(infofn), 'wb')
-				try:
-					json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
-					json.dump(json_info_dict, infof)
-				finally:
-					infof.close()
-			except (OSError, IOError):
-				self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
-				return
-
-		if not self.params.get('skip_download', False):
-			if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
-				success = True
-			else:
-				try:
-					success = self._do_download(filename, info_dict)
-				except (OSError, IOError), err:
-					raise UnavailableVideoError
-				except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-					self.trouble(u'ERROR: unable to download video data: %s' % str(err))
-					return
-				except (ContentTooShortError, ), err:
-					self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
-					return
-
-			if success:
-				try:
-					self.post_process(filename, info_dict)
-				except (PostProcessingError), err:
-					self.trouble(u'ERROR: postprocessing: %s' % str(err))
-					return
-
-	def download(self, url_list):
-		"""Download a given list of URLs."""
-		if len(url_list) > 1 and self.fixed_template():
-			raise SameFileError(self.params['outtmpl'])
-
-		for url in url_list:
-			suitable_found = False
-			for ie in self._ies:
-				# Go to next InfoExtractor if not suitable
-				if not ie.suitable(url):
-					continue
-
-				# Suitable InfoExtractor found
-				suitable_found = True
-
-				# Extract information from URL and process it
-				videos = ie.extract(url)
-				for video in videos or []:
-					video['extractor'] = ie.IE_NAME
-					try:
-						self.increment_downloads()
-						self.process_info(video)
-					except UnavailableVideoError:
-						self.trouble(u'\nERROR: unable to download video')
-
-				# Suitable InfoExtractor had been found; go to next URL
-				break
-
-			if not suitable_found:
-				self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
-
-		return self._download_retcode
-
-	def post_process(self, filename, ie_info):
-		"""Run the postprocessing chain on the given file."""
-		info = dict(ie_info)
-		info['filepath'] = filename
-		for pp in self._pps:
-			info = pp.run(info)
-			if info is None:
-				break
-
-	def _download_with_rtmpdump(self, filename, url, player_url):
-		self.report_destination(filename)
-		tmpfilename = self.temp_name(filename)
-
-		# Check for rtmpdump first
-		try:
-			subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
-		except (OSError, IOError):
-			self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
-			return False
-
-		# Download using rtmpdump. rtmpdump returns exit code 2 when
-		# the connection was interrumpted and resuming appears to be
-		# possible. This is part of rtmpdump's normal usage, AFAIK.
-		basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
-		args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
-		if self.params.get('verbose', False):
-			try:
-				import pipes
-				shell_quote = lambda args: ' '.join(map(pipes.quote, args))
-			except ImportError:
-				shell_quote = repr
-			self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
-		retval = subprocess.call(args)
-		while retval == 2 or retval == 1:
-			prevsize = os.path.getsize(encodeFilename(tmpfilename))
-			self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
-			time.sleep(5.0) # This seems to be needed
-			retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
-			cursize = os.path.getsize(encodeFilename(tmpfilename))
-			if prevsize == cursize and retval == 1:
-				break
-			 # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
-			if prevsize == cursize and retval == 2 and cursize > 1024:
-				self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
-				retval = 0
-				break
-		if retval == 0:
-			self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename)))
-			self.try_rename(tmpfilename, filename)
-			return True
-		else:
-			self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
-			return False
-
-	def _do_download(self, filename, info_dict):
-		url = info_dict['url']
-		player_url = info_dict.get('player_url', None)
-
-		# Check file already present
-		if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
-			self.report_file_already_downloaded(filename)
-			return True
-
-		# Attempt to download using rtmpdump
-		if url.startswith('rtmp'):
-			return self._download_with_rtmpdump(filename, url, player_url)
-
-		tmpfilename = self.temp_name(filename)
-		stream = None
-
-		# Do not include the Accept-Encoding header
-		headers = {'Youtubedl-no-compression': 'True'}
-		basic_request = urllib2.Request(url, None, headers)
-		request = urllib2.Request(url, None, headers)
-
-		# Establish possible resume length
-		if os.path.isfile(encodeFilename(tmpfilename)):
-			resume_len = os.path.getsize(encodeFilename(tmpfilename))
-		else:
-			resume_len = 0
-
-		open_mode = 'wb'
-		if resume_len != 0:
-			if self.params.get('continuedl', False):
-				self.report_resuming_byte(resume_len)
-				request.add_header('Range','bytes=%d-' % resume_len)
-				open_mode = 'ab'
-			else:
-				resume_len = 0
-
-		count = 0
-		retries = self.params.get('retries', 0)
-		while count <= retries:
-			# Establish connection
-			try:
-				if count == 0 and 'urlhandle' in info_dict:
-					data = info_dict['urlhandle']
-				data = urllib2.urlopen(request)
-				break
-			except (urllib2.HTTPError, ), err:
-				if (err.code < 500 or err.code >= 600) and err.code != 416:
-					# Unexpected HTTP error
-					raise
-				elif err.code == 416:
-					# Unable to resume (requested range not satisfiable)
-					try:
-						# Open the connection again without the range header
-						data = urllib2.urlopen(basic_request)
-						content_length = data.info()['Content-Length']
-					except (urllib2.HTTPError, ), err:
-						if err.code < 500 or err.code >= 600:
-							raise
-					else:
-						# Examine the reported length
-						if (content_length is not None and
-								(resume_len - 100 < long(content_length) < resume_len + 100)):
-							# The file had already been fully downloaded.
-							# Explanation to the above condition: in issue #175 it was revealed that
-							# YouTube sometimes adds or removes a few bytes from the end of the file,
-							# changing the file size slightly and causing problems for some users. So
-							# I decided to implement a suggested change and consider the file
-							# completely downloaded if the file size differs less than 100 bytes from
-							# the one in the hard drive.
-							self.report_file_already_downloaded(filename)
-							self.try_rename(tmpfilename, filename)
-							return True
-						else:
-							# The length does not match, we start the download over
-							self.report_unable_to_resume()
-							open_mode = 'wb'
-							break
-			# Retry
-			count += 1
-			if count <= retries:
-				self.report_retry(count, retries)
-
-		if count > retries:
-			self.trouble(u'ERROR: giving up after %s retries' % retries)
-			return False
-
-		data_len = data.info().get('Content-length', None)
-		if data_len is not None:
-			data_len = long(data_len) + resume_len
-		data_len_str = self.format_bytes(data_len)
-		byte_counter = 0 + resume_len
-		block_size = 1024
-		start = time.time()
-		while True:
-			# Download and write
-			before = time.time()
-			data_block = data.read(block_size)
-			after = time.time()
-			if len(data_block) == 0:
-				break
-			byte_counter += len(data_block)
-
-			# Open file just in time
-			if stream is None:
-				try:
-					(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
-					assert stream is not None
-					filename = self.undo_temp_name(tmpfilename)
-					self.report_destination(filename)
-				except (OSError, IOError), err:
-					self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
-					return False
-			try:
-				stream.write(data_block)
-			except (IOError, OSError), err:
-				self.trouble(u'\nERROR: unable to write data: %s' % str(err))
-				return False
-			block_size = self.best_block_size(after - before, len(data_block))
-
-			# Progress message
-			speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
-			if data_len is None:
-				self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
-			else:
-				percent_str = self.calc_percent(byte_counter, data_len)
-				eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
-				self.report_progress(percent_str, data_len_str, speed_str, eta_str)
-
-			# Apply rate limit
-			self.slow_down(start, byte_counter - resume_len)
-
-		if stream is None:
-			self.trouble(u'\nERROR: Did not get any data blocks')
-			return False
-		stream.close()
-		self.report_finish()
-		if data_len is not None and byte_counter != data_len:
-			raise ContentTooShortError(byte_counter, long(data_len))
-		self.try_rename(tmpfilename, filename)
-
-		# Update file modification time
-		if self.params.get('updatetime', True):
-			info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
-
-		return True
+    """File Downloader class.
+
+    File downloader objects are the ones responsible of downloading the
+    actual video file and writing it to disk if the user has requested
+    it, among some other tasks. In most cases there should be one per
+    program. As, given a video URL, the downloader doesn't know how to
+    extract all the needed information, task that InfoExtractors do, it
+    has to pass the URL to one of them.
+
+    For this, file downloader objects have a method that allows
+    InfoExtractors to be registered in a given order. When it is passed
+    a URL, the file downloader handles it to the first InfoExtractor it
+    finds that reports being able to handle it. The InfoExtractor extracts
+    all the information about the video or videos the URL refers to, and
+    asks the FileDownloader to process the video information, possibly
+    downloading the video.
+
+    File downloaders accept a lot of parameters. In order not to saturate
+    the object constructor with arguments, it receives a dictionary of
+    options instead. These options are available through the params
+    attribute for the InfoExtractors to use. The FileDownloader also
+    registers itself as the downloader in charge for the InfoExtractors
+    that are added to it, so this is a "mutual registration".
+
+    Available options:
+
+    username:          Username for authentication purposes.
+    password:          Password for authentication purposes.
+    usenetrc:          Use netrc for authentication instead.
+    quiet:             Do not print messages to stdout.
+    forceurl:          Force printing final URL.
+    forcetitle:        Force printing title.
+    forcethumbnail:    Force printing thumbnail URL.
+    forcedescription:  Force printing description.
+    forcefilename:     Force printing final filename.
+    simulate:          Do not download the video files.
+    format:            Video format code.
+    format_limit:      Highest quality format to try.
+    outtmpl:           Template for output names.
+    restrictfilenames: Do not allow "&" and spaces in file names
+    ignoreerrors:      Do not stop on download errors.
+    ratelimit:         Download speed limit, in bytes/sec.
+    nooverwrites:      Prevent overwriting files.
+    retries:           Number of times to retry for HTTP error 5xx
+    buffersize:        Size of download buffer in bytes.
+    noresizebuffer:    Do not automatically resize the download buffer.
+    continuedl:        Try to continue downloads if possible.
+    noprogress:        Do not print the progress bar.
+    playliststart:     Playlist item to start at.
+    playlistend:       Playlist item to end at.
+    matchtitle:        Download only matching titles.
+    rejecttitle:       Reject downloads for matching titles.
+    logtostderr:       Log messages to stderr instead of stdout.
+    consoletitle:      Display progress in console window's titlebar.
+    nopart:            Do not use temporary .part files.
+    updatetime:        Use the Last-modified header to set output file timestamps.
+    writedescription:  Write the video description to a .description file
+    writeinfojson:     Write the video description to a .info.json file
+    writesubtitles:    Write the video subtitles to a .srt file
+    subtitleslang:     Language of the subtitles to download
+    test:              Download only first bytes to test the downloader.
+    """
+
+    params = None
+    _ies = []
+    _pps = []
+    _download_retcode = None
+    _num_downloads = None
+    _screen_file = None
+
+    def __init__(self, params):
+        """Create a FileDownloader object with the given options."""
+        self._ies = []
+        self._pps = []
+        self._download_retcode = 0
+        self._num_downloads = 0
+        self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
+        self.params = params
+
+        if '%(stitle)s' in self.params['outtmpl']:
+            self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
+
+    @staticmethod
+    def format_bytes(bytes):
+        if bytes is None:
+            return 'N/A'
+        if type(bytes) is str:
+            bytes = float(bytes)
+        if bytes == 0.0:
+            exponent = 0
+        else:
+            exponent = int(math.log(bytes, 1024.0))
+        suffix = 'bkMGTPEZY'[exponent]
+        converted = float(bytes) / float(1024 ** exponent)
+        return '%.2f%s' % (converted, suffix)
+
+    @staticmethod
+    def calc_percent(byte_counter, data_len):
+        if data_len is None:
+            return '---.-%'
+        return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
+
+    @staticmethod
+    def calc_eta(start, now, total, current):
+        if total is None:
+            return '--:--'
+        dif = now - start
+        if current == 0 or dif < 0.001: # One millisecond
+            return '--:--'
+        rate = float(current) / dif
+        eta = int((float(total) - float(current)) / rate)
+        (eta_mins, eta_secs) = divmod(eta, 60)
+        if eta_mins > 99:
+            return '--:--'
+        return '%02d:%02d' % (eta_mins, eta_secs)
+
+    @staticmethod
+    def calc_speed(start, now, bytes):
+        dif = now - start
+        if bytes == 0 or dif < 0.001: # One millisecond
+            return '%10s' % '---b/s'
+        return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
+
+    @staticmethod
+    def best_block_size(elapsed_time, bytes):
+        new_min = max(bytes / 2.0, 1.0)
+        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
+        if elapsed_time < 0.001:
+            return int(new_max)
+        rate = bytes / elapsed_time
+        if rate > new_max:
+            return int(new_max)
+        if rate < new_min:
+            return int(new_min)
+        return int(rate)
+
+    @staticmethod
+    def parse_bytes(bytestr):
+        """Parse a string indicating a byte quantity into an integer."""
+        matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
+        if matchobj is None:
+            return None
+        number = float(matchobj.group(1))
+        multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
+        return int(round(number * multiplier))
+
+    def add_info_extractor(self, ie):
+        """Add an InfoExtractor object to the end of the list."""
+        self._ies.append(ie)
+        ie.set_downloader(self)
+
+    def add_post_processor(self, pp):
+        """Add a PostProcessor object to the end of the chain."""
+        self._pps.append(pp)
+        pp.set_downloader(self)
+
+    def to_screen(self, message, skip_eol=False):
+        """Print message to stdout if not in quiet mode."""
+        assert type(message) == type(u'')
+        if not self.params.get('quiet', False):
+            terminator = [u'\n', u''][skip_eol]
+            output = message + terminator
+            if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
+                output = output.encode(preferredencoding(), 'ignore')
+            self._screen_file.write(output)
+            self._screen_file.flush()
+
+    def to_stderr(self, message):
+        """Print message to stderr."""
+        assert type(message) == type(u'')
+        output = message + u'\n'
+        if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
+            output = output.encode(preferredencoding())
+        sys.stderr.write(output)
+
+    def to_cons_title(self, message):
+        """Set console/terminal window title to message."""
+        if not self.params.get('consoletitle', False):
+            return
+        if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
+            # c_wchar_p() might not be necessary if `message` is
+            # already of type unicode()
+            ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
+        elif 'TERM' in os.environ:
+            sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
+
+    def fixed_template(self):
+        """Checks if the output template is fixed."""
+        return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None)
+
+    def trouble(self, message=None, tb=None):
+        """Determine action to take when a download problem appears.
+
+        Depending on if the downloader has been configured to ignore
+        download errors or not, this method may throw an exception or
+        not when errors are found, after printing the message.
+
+        tb, if given, is additional traceback information.
+        """
+        if message is not None:
+            self.to_stderr(message)
+        if self.params.get('verbose'):
+            if tb is None:
+                tb_data = traceback.format_list(traceback.extract_stack())
+                tb = u''.join(tb_data)
+            self.to_stderr(tb)
+        if not self.params.get('ignoreerrors', False):
+            raise DownloadError(message)
+        self._download_retcode = 1
+
+    def slow_down(self, start_time, byte_counter):
+        """Sleep if the download speed is over the rate limit."""
+        rate_limit = self.params.get('ratelimit', None)
+        if rate_limit is None or byte_counter == 0:
+            return
+        now = time.time()
+        elapsed = now - start_time
+        if elapsed <= 0.0:
+            return
+        speed = float(byte_counter) / elapsed
+        if speed > rate_limit:
+            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
+
+    def temp_name(self, filename):
+        """Returns a temporary filename for the given filename."""
+        if self.params.get('nopart', False) or filename == u'-' or \
+                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
+            return filename
+        return filename + u'.part'
+
+    def undo_temp_name(self, filename):
+        if filename.endswith(u'.part'):
+            return filename[:-len(u'.part')]
+        return filename
+
+    def try_rename(self, old_filename, new_filename):
+        try:
+            if old_filename == new_filename:
+                return
+            os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
+        except (IOError, OSError) as err:
+            self.trouble(u'ERROR: unable to rename file')
+
+    def try_utime(self, filename, last_modified_hdr):
+        """Try to set the last-modified time of the given file."""
+        if last_modified_hdr is None:
+            return
+        if not os.path.isfile(encodeFilename(filename)):
+            return
+        timestr = last_modified_hdr
+        if timestr is None:
+            return
+        filetime = timeconvert(timestr)
+        if filetime is None:
+            return filetime
+        try:
+            os.utime(filename, (time.time(), filetime))
+        except:
+            pass
+        return filetime
+
+    def report_writedescription(self, descfn):
+        """ Report that the description file is being written """
+        self.to_screen(u'[info] Writing video description to: ' + descfn)
+
+    def report_writesubtitles(self, srtfn):
+        """ Report that the subtitles file is being written """
+        self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
+
+    def report_writeinfojson(self, infofn):
+        """ Report that the metadata file has been written """
+        self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
+
+    def report_destination(self, filename):
+        """Report destination filename."""
+        self.to_screen(u'[download] Destination: ' + filename)
+
+    def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
+        """Report download progress."""
+        if self.params.get('noprogress', False):
+            return
+        self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
+                (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
+        self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
+                (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
+
+    def report_resuming_byte(self, resume_len):
+        """Report attempt to resume at given byte."""
+        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
+
+    def report_retry(self, count, retries):
+        """Report retry in case of HTTP error 5xx"""
+        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+
+    def report_file_already_downloaded(self, file_name):
+        """Report file has already been fully downloaded."""
+        try:
+            self.to_screen(u'[download] %s has already been downloaded' % file_name)
+        except (UnicodeEncodeError) as err:
+            self.to_screen(u'[download] The file has already been downloaded')
+
+    def report_unable_to_resume(self):
+        """Report it was impossible to resume download."""
+        self.to_screen(u'[download] Unable to resume')
+
+    def report_finish(self):
+        """Report download finished."""
+        if self.params.get('noprogress', False):
+            self.to_screen(u'[download] Download completed')
+        else:
+            self.to_screen(u'')
+
+    def increment_downloads(self):
+        """Increment the ordinal that assigns a number to each file."""
+        self._num_downloads += 1
+
+    def prepare_filename(self, info_dict):
+        """Generate the output filename."""
+        try:
+            template_dict = dict(info_dict)
+
+            template_dict['epoch'] = int(time.time())
+            template_dict['autonumber'] = u'%05d' % self._num_downloads
+
+            sanitize = lambda k,v: sanitize_filename(
+                u'NA' if v is None else compat_str(v),
+                restricted=self.params.get('restrictfilenames'),
+                is_id=(k==u'id'))
+            template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
+
+            filename = self.params['outtmpl'] % template_dict
+            return filename
+        except (ValueError, KeyError) as err:
+            self.trouble(u'ERROR: invalid system charset or erroneous output template')
+            return None
+
+    def _match_entry(self, info_dict):
+        """ Returns None iff the file should be downloaded """
+
+        title = info_dict['title']
+        matchtitle = self.params.get('matchtitle', False)
+        if matchtitle:
+            matchtitle = matchtitle.decode('utf8')
+            if not re.search(matchtitle, title, re.IGNORECASE):
+                return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
+        rejecttitle = self.params.get('rejecttitle', False)
+        if rejecttitle:
+            rejecttitle = rejecttitle.decode('utf8')
+            if re.search(rejecttitle, title, re.IGNORECASE):
+                return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
+        return None
+
+    def process_info(self, info_dict):
+        """Process a single dictionary returned by an InfoExtractor."""
+
+        # Keep for backwards compatibility
+        info_dict['stitle'] = info_dict['title']
+
+        if not 'format' in info_dict:
+            info_dict['format'] = info_dict['ext']
+
+        reason = self._match_entry(info_dict)
+        if reason is not None:
+            self.to_screen(u'[download] ' + reason)
+            return
+
+        max_downloads = self.params.get('max_downloads')
+        if max_downloads is not None:
+            if self._num_downloads > int(max_downloads):
+                raise MaxDownloadsReached()
+
+        filename = self.prepare_filename(info_dict)
+
+        # Forced printings
+        if self.params.get('forcetitle', False):
+            compat_print(info_dict['title'])
+        if self.params.get('forceurl', False):
+            compat_print(info_dict['url'])
+        if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
+            compat_print(info_dict['thumbnail'])
+        if self.params.get('forcedescription', False) and 'description' in info_dict:
+            compat_print(info_dict['description'])
+        if self.params.get('forcefilename', False) and filename is not None:
+            compat_print(filename)
+        if self.params.get('forceformat', False):
+            compat_print(info_dict['format'])
+
+        # Do nothing else if in simulate mode
+        if self.params.get('simulate', False):
+            return
+
+        if filename is None:
+            return
+
+        try:
+            dn = os.path.dirname(encodeFilename(filename))
+            if dn != '' and not os.path.exists(dn): # dn is already encoded
+                os.makedirs(dn)
+        except (OSError, IOError) as err:
+            self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
+            return
+
+        if self.params.get('writedescription', False):
+            try:
+                descfn = filename + u'.description'
+                self.report_writedescription(descfn)
+                with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
+                    descfile.write(info_dict['description'])
+            except (OSError, IOError):
+                self.trouble(u'ERROR: Cannot write description file ' + descfn)
+                return
+
+        if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
+            # subtitles download errors are already managed as troubles in relevant IE
+            # that way it will silently go on when used with unsupporting IE
+            try:
+                srtfn = filename.rsplit('.', 1)[0] + u'.srt'
+                self.report_writesubtitles(srtfn)
+                with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile:
+                    srtfile.write(info_dict['subtitles'])
+            except (OSError, IOError):
+                self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
+                return
+
+        if self.params.get('writeinfojson', False):
+            infofn = filename + u'.info.json'
+            self.report_writeinfojson(infofn)
+            try:
+                json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
+                write_json_file(json_info_dict, encodeFilename(infofn))
+            except (OSError, IOError):
+                self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
+                return
+
+        if not self.params.get('skip_download', False):
+            if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
+                success = True
+            else:
+                try:
+                    success = self._do_download(filename, info_dict)
+                except (OSError, IOError) as err:
+                    raise UnavailableVideoError()
+                except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                    self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+                    return
+                except (ContentTooShortError, ) as err:
+                    self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+                    return
+
+            if success:
+                try:
+                    self.post_process(filename, info_dict)
+                except (PostProcessingError) as err:
+                    self.trouble(u'ERROR: postprocessing: %s' % str(err))
+                    return
+
+    def download(self, url_list):
+        """Download a given list of URLs."""
+        if len(url_list) > 1 and self.fixed_template():
+            raise SameFileError(self.params['outtmpl'])
+
+        for url in url_list:
+            suitable_found = False
+            for ie in self._ies:
+                # Go to next InfoExtractor if not suitable
+                if not ie.suitable(url):
+                    continue
+
+                # Warn if the _WORKING attribute is False
+                if not ie.working():
+                    self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
+                                   u'and will probably not work. If you want to go on, use the -i option.')
+
+                # Suitable InfoExtractor found
+                suitable_found = True
+
+                # Extract information from URL and process it
+                try:
+                    videos = ie.extract(url)
+                except ExtractorError as de: # An error we somewhat expected
+                    self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
+                    break
+                except Exception as e:
+                    if self.params.get('ignoreerrors', False):
+                        self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
+                        break
+                    else:
+                        raise
+
+                if len(videos or []) > 1 and self.fixed_template():
+                    raise SameFileError(self.params['outtmpl'])
+
+                for video in videos or []:
+                    video['extractor'] = ie.IE_NAME
+                    try:
+                        self.increment_downloads()
+                        self.process_info(video)
+                    except UnavailableVideoError:
+                        self.trouble(u'\nERROR: unable to download video')
+
+                # Suitable InfoExtractor had been found; go to next URL
+                break
+
+            if not suitable_found:
+                self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
+
+        return self._download_retcode
+
+    def post_process(self, filename, ie_info):
+        """Run the postprocessing chain on the given file."""
+        info = dict(ie_info)
+        info['filepath'] = filename
+        for pp in self._pps:
+            info = pp.run(info)
+            if info is None:
+                break
+
+    def _download_with_rtmpdump(self, filename, url, player_url, page_url):
+        self.report_destination(filename)
+        tmpfilename = self.temp_name(filename)
+
+        # Check for rtmpdump first
+        try:
+            subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+        except (OSError, IOError):
+            self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
+            return False
+
+        # Download using rtmpdump. rtmpdump returns exit code 2 when
+        # the connection was interrumpted and resuming appears to be
+        # possible. This is part of rtmpdump's normal usage, AFAIK.
+        basic_args = ['rtmpdump', '-q', '-r', url, '-o', tmpfilename]
+        if player_url is not None:
+            basic_args += ['-W', player_url]
+        if page_url is not None:
+            basic_args += ['--pageUrl', page_url]
+        args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
+        if self.params.get('verbose', False):
+            try:
+                import pipes
+                shell_quote = lambda args: ' '.join(map(pipes.quote, args))
+            except ImportError:
+                shell_quote = repr
+            self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
+        retval = subprocess.call(args)
+        while retval == 2 or retval == 1:
+            prevsize = os.path.getsize(encodeFilename(tmpfilename))
+            self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
+            time.sleep(5.0) # This seems to be needed
+            retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
+            cursize = os.path.getsize(encodeFilename(tmpfilename))
+            if prevsize == cursize and retval == 1:
+                break
+             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
+            if prevsize == cursize and retval == 2 and cursize > 1024:
+                self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
+                retval = 0
+                break
+        if retval == 0:
+            self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename)))
+            self.try_rename(tmpfilename, filename)
+            return True
+        else:
+            self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
+            return False
+
+    def _do_download(self, filename, info_dict):
+        url = info_dict['url']
+
+        # Check file already present
+        if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
+            self.report_file_already_downloaded(filename)
+            return True
+
+        # Attempt to download using rtmpdump
+        if url.startswith('rtmp'):
+            return self._download_with_rtmpdump(filename, url,
+                                                info_dict.get('player_url', None),
+                                                info_dict.get('page_url', None))
+
+        tmpfilename = self.temp_name(filename)
+        stream = None
+
+        # Do not include the Accept-Encoding header
+        headers = {'Youtubedl-no-compression': 'True'}
+        basic_request = compat_urllib_request.Request(url, None, headers)
+        request = compat_urllib_request.Request(url, None, headers)
+
+        if self.params.get('test', False):
+            request.add_header('Range','bytes=0-10240')
+
+        # Establish possible resume length
+        if os.path.isfile(encodeFilename(tmpfilename)):
+            resume_len = os.path.getsize(encodeFilename(tmpfilename))
+        else:
+            resume_len = 0
+
+        open_mode = 'wb'
+        if resume_len != 0:
+            if self.params.get('continuedl', False):
+                self.report_resuming_byte(resume_len)
+                request.add_header('Range','bytes=%d-' % resume_len)
+                open_mode = 'ab'
+            else:
+                resume_len = 0
+
+        count = 0
+        retries = self.params.get('retries', 0)
+        while count <= retries:
+            # Establish connection
+            try:
+                if count == 0 and 'urlhandle' in info_dict:
+                    data = info_dict['urlhandle']
+                data = compat_urllib_request.urlopen(request)
+                break
+            except (compat_urllib_error.HTTPError, ) as err:
+                if (err.code < 500 or err.code >= 600) and err.code != 416:
+                    # Unexpected HTTP error
+                    raise
+                elif err.code == 416:
+                    # Unable to resume (requested range not satisfiable)
+                    try:
+                        # Open the connection again without the range header
+                        data = compat_urllib_request.urlopen(basic_request)
+                        content_length = data.info()['Content-Length']
+                    except (compat_urllib_error.HTTPError, ) as err:
+                        if err.code < 500 or err.code >= 600:
+                            raise
+                    else:
+                        # Examine the reported length
+                        if (content_length is not None and
+                                (resume_len - 100 < int(content_length) < resume_len + 100)):
+                            # The file had already been fully downloaded.
+                            # Explanation to the above condition: in issue #175 it was revealed that
+                            # YouTube sometimes adds or removes a few bytes from the end of the file,
+                            # changing the file size slightly and causing problems for some users. So
+                            # I decided to implement a suggested change and consider the file
+                            # completely downloaded if the file size differs less than 100 bytes from
+                            # the one in the hard drive.
+                            self.report_file_already_downloaded(filename)
+                            self.try_rename(tmpfilename, filename)
+                            return True
+                        else:
+                            # The length does not match, we start the download over
+                            self.report_unable_to_resume()
+                            open_mode = 'wb'
+                            break
+            # Retry
+            count += 1
+            if count <= retries:
+                self.report_retry(count, retries)
+
+        if count > retries:
+            self.trouble(u'ERROR: giving up after %s retries' % retries)
+            return False
+
+        data_len = data.info().get('Content-length', None)
+        if data_len is not None:
+            data_len = int(data_len) + resume_len
+        data_len_str = self.format_bytes(data_len)
+        byte_counter = 0 + resume_len
+        block_size = self.params.get('buffersize', 1024)
+        start = time.time()
+        while True:
+            # Download and write
+            before = time.time()
+            data_block = data.read(block_size)
+            after = time.time()
+            if len(data_block) == 0:
+                break
+            byte_counter += len(data_block)
+
+            # Open file just in time
+            if stream is None:
+                try:
+                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
+                    assert stream is not None
+                    filename = self.undo_temp_name(tmpfilename)
+                    self.report_destination(filename)
+                except (OSError, IOError) as err:
+                    self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
+                    return False
+            try:
+                stream.write(data_block)
+            except (IOError, OSError) as err:
+                self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+                return False
+            if not self.params.get('noresizebuffer', False):
+                block_size = self.best_block_size(after - before, len(data_block))
+
+            # Progress message
+            speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
+            if data_len is None:
+                self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
+            else:
+                percent_str = self.calc_percent(byte_counter, data_len)
+                eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
+                self.report_progress(percent_str, data_len_str, speed_str, eta_str)
+
+            # Apply rate limit
+            self.slow_down(start, byte_counter - resume_len)
+
+        if stream is None:
+            self.trouble(u'\nERROR: Did not get any data blocks')
+            return False
+        stream.close()
+        self.report_finish()
+        if data_len is not None and byte_counter != data_len:
+            raise ContentTooShortError(byte_counter, int(data_len))
+        self.try_rename(tmpfilename, filename)
+
+        # Update file modification time
+        if self.params.get('updatetime', True):
+            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
+
+        return True

+ 3622 - 3536
youtube_dl/InfoExtractors.py

@@ -1,3696 +1,3782 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 
 
+from __future__ import absolute_import
+
+import base64
 import datetime
 import datetime
-import HTMLParser
-import httplib
 import netrc
 import netrc
 import os
 import os
 import re
 import re
 import socket
 import socket
 import time
 import time
-import urllib
-import urllib2
 import email.utils
 import email.utils
 import xml.etree.ElementTree
 import xml.etree.ElementTree
 import random
 import random
 import math
 import math
-from urlparse import parse_qs, urlparse
-
-try:
-	import cStringIO as StringIO
-except ImportError:
-	import StringIO
 
 
-from utils import *
+from .utils import *
 
 
 
 
 class InfoExtractor(object):
 class InfoExtractor(object):
-	"""Information Extractor class.
-
-	Information extractors are the classes that, given a URL, extract
-	information from the video (or videos) the URL refers to. This
-	information includes the real video URL, the video title and simplified
-	title, author and others. The information is stored in a dictionary
-	which is then passed to the FileDownloader. The FileDownloader
-	processes this information possibly downloading the video to the file
-	system, among other possible outcomes. The dictionaries must include
-	the following fields:
-
-	id:		Video identifier.
-	url:		Final video URL.
-	uploader:	Nickname of the video uploader.
-	title:		Literal title.
-	ext:		Video filename extension.
-	format:		Video format.
-	player_url:	SWF Player URL (may be None).
-
-	The following fields are optional. Their primary purpose is to allow
-	youtube-dl to serve as the backend for a video search function, such
-	as the one in youtube2mp3.  They are only used when their respective
-	forced printing functions are called:
-
-	thumbnail:	Full URL to a video thumbnail image.
-	description:	One-line video description.
-
-	Subclasses of this one should re-define the _real_initialize() and
-	_real_extract() methods and define a _VALID_URL regexp.
-	Probably, they should also be added to the list of extractors.
-	"""
-
-	_ready = False
-	_downloader = None
-
-	def __init__(self, downloader=None):
-		"""Constructor. Receives an optional downloader."""
-		self._ready = False
-		self.set_downloader(downloader)
-
-	def suitable(self, url):
-		"""Receives a URL and returns True if suitable for this IE."""
-		return re.match(self._VALID_URL, url) is not None
-
-	def initialize(self):
-		"""Initializes an instance (authentication, etc)."""
-		if not self._ready:
-			self._real_initialize()
-			self._ready = True
-
-	def extract(self, url):
-		"""Extracts URL information and returns it in list of dicts."""
-		self.initialize()
-		return self._real_extract(url)
-
-	def set_downloader(self, downloader):
-		"""Sets the downloader for this IE."""
-		self._downloader = downloader
-
-	def _real_initialize(self):
-		"""Real initialization process. Redefine in subclasses."""
-		pass
-
-	def _real_extract(self, url):
-		"""Real extraction process. Redefine in subclasses."""
-		pass
-
-
+    """Information Extractor class.
+
+    Information extractors are the classes that, given a URL, extract
+    information about the video (or videos) the URL refers to. This
+    information includes the real video URL, the video title, author and
+    others. The information is stored in a dictionary which is then
+    passed to the FileDownloader. The FileDownloader processes this
+    information possibly downloading the video to the file system, among
+    other possible outcomes.
+
+    The dictionaries must include the following fields:
+
+    id:             Video identifier.
+    url:            Final video URL.
+    title:          Video title, unescaped.
+    ext:            Video filename extension.
+    uploader:       Full name of the video uploader.
+    upload_date:    Video upload date (YYYYMMDD).
+
+    The following fields are optional:
+
+    format:         The video format, defaults to ext (used for --get-format)
+    thumbnail:      Full URL to a video thumbnail image.
+    description:    One-line video description.
+    uploader_id:    Nickname or id of the video uploader.
+    player_url:     SWF Player URL (used for rtmpdump).
+    subtitles:      The .srt file contents.
+    urlhandle:      [internal] The urlHandle to be used to download the file,
+                    like returned by urllib.request.urlopen
+
+    The fields should all be Unicode strings.
+
+    Subclasses of this one should re-define the _real_initialize() and
+    _real_extract() methods and define a _VALID_URL regexp.
+    Probably, they should also be added to the list of extractors.
+
+    _real_extract() must return a *list* of information dictionaries as
+    described above.
+
+    Finally, the _WORKING attribute should be set to False for broken IEs
+    in order to warn the users and skip the tests.
+    """
+
+    _ready = False
+    _downloader = None
+    _WORKING = True
+
+    def __init__(self, downloader=None):
+        """Constructor. Receives an optional downloader."""
+        self._ready = False
+        self.set_downloader(downloader)
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url) is not None
+
+    def working(self):
+        """Getter method for _WORKING."""
+        return self._WORKING
+
+    def initialize(self):
+        """Initializes an instance (authentication, etc)."""
+        if not self._ready:
+            self._real_initialize()
+            self._ready = True
+
+    def extract(self, url):
+        """Extracts URL information and returns it in list of dicts."""
+        self.initialize()
+        return self._real_extract(url)
+
+    def set_downloader(self, downloader):
+        """Sets the downloader for this IE."""
+        self._downloader = downloader
+
+    def _real_initialize(self):
+        """Real initialization process. Redefine in subclasses."""
+        pass
+
+    def _real_extract(self, url):
+        """Real extraction process. Redefine in subclasses."""
+        pass
+
+    @property
+    def IE_NAME(self):
+        return type(self).__name__[:-2]
+
+    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+        if note is None:
+            note = u'Downloading video webpage'
+        self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
+        try:
+            urlh = compat_urllib_request.urlopen(url_or_request)
+            webpage_bytes = urlh.read()
+            return webpage_bytes.decode('utf-8', 'replace')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            if errnote is None:
+                errnote = u'Unable to download webpage'
+            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
 
 
 
 
 class YoutubeIE(InfoExtractor):
 class YoutubeIE(InfoExtractor):
-	"""Information extractor for youtube.com."""
-
-	_VALID_URL = r"""^
-	                 (
-	                     (?:https?://)?                                       # http(s):// (optional)
-	                     (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
-	                     	tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
-	                     (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
-	                     (?!view_play_list|my_playlists|artist|playlist)      # ignore playlist URLs
-	                     (?:                                                  # the various things that can precede the ID:
-	                         (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
-	                         |(?:                                             # or the v= param in all its forms
-	                             (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
-	                             (?:\?|\#!?)                                  # the params delimiter ? or # or #!
-	                             (?:.+&)?                                     # any other preceding param (like /?s=tuff&v=xxxx)
-	                             v=
-	                         )
-	                     )?                                                   # optional -> youtube.com/xxxx is OK
-	                 )?                                                       # all until now is optional -> you can pass the naked ID
-	                 ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
-	                 (?(1).+)?                                                # if we found the ID, everything can follow
-	                 $"""
-	_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-	_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
-	_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
-	_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
-	_NETRC_MACHINE = 'youtube'
-	# Listed in order of quality
-	_available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
-	_available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
-	_video_extensions = {
-		'13': '3gp',
-		'17': 'mp4',
-		'18': 'mp4',
-		'22': 'mp4',
-		'37': 'mp4',
-		'38': 'video', # You actually don't know if this will be MOV, AVI or whatever
-		'43': 'webm',
-		'44': 'webm',
-		'45': 'webm',
-		'46': 'webm',
-	}
-	_video_dimensions = {
-		'5': '240x400',
-		'6': '???',
-		'13': '???',
-		'17': '144x176',
-		'18': '360x640',
-		'22': '720x1280',
-		'34': '360x640',
-		'35': '480x854',
-		'37': '1080x1920',
-		'38': '3072x4096',
-		'43': '360x640',
-		'44': '480x854',
-		'45': '720x1280',
-		'46': '1080x1920',
-	}	
-	IE_NAME = u'youtube'
-
-	def suitable(self, url):
-		"""Receives a URL and returns True if suitable for this IE."""
-		return re.match(self._VALID_URL, url, re.VERBOSE) is not None
-
-	def report_lang(self):
-		"""Report attempt to set language."""
-		self._downloader.to_screen(u'[youtube] Setting language')
-
-	def report_login(self):
-		"""Report attempt to log in."""
-		self._downloader.to_screen(u'[youtube] Logging in')
-
-	def report_age_confirmation(self):
-		"""Report attempt to confirm age."""
-		self._downloader.to_screen(u'[youtube] Confirming age')
-
-	def report_video_webpage_download(self, video_id):
-		"""Report attempt to download video webpage."""
-		self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
-
-	def report_video_info_webpage_download(self, video_id):
-		"""Report attempt to download video info webpage."""
-		self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
-
-	def report_video_subtitles_download(self, video_id):
-		"""Report attempt to download video info webpage."""
-		self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
-
-	def report_information_extraction(self, video_id):
-		"""Report attempt to extract video information."""
-		self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
-
-	def report_unavailable_format(self, video_id, format):
-		"""Report extracted video URL."""
-		self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
-
-	def report_rtmp_download(self):
-		"""Indicate the download will use the RTMP protocol."""
-		self._downloader.to_screen(u'[youtube] RTMP download detected')
-
-	def _closed_captions_xml_to_srt(self, xml_string):
-		srt = ''
-		texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
-		# TODO parse xml instead of regex
-		for n, (start, dur_tag, dur, caption) in enumerate(texts):
-			if not dur: dur = '4'
-			start = float(start)
-			end = start + float(dur)
-			start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
-			end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
-			caption = unescapeHTML(caption)
-			caption = unescapeHTML(caption) # double cycle, intentional
-			srt += str(n+1) + '\n'
-			srt += start + ' --> ' + end + '\n'
-			srt += caption + '\n\n'
-		return srt
-
-	def _print_formats(self, formats):
-		print 'Available formats:'
-		for x in formats:
-			print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))
-
-	def _real_initialize(self):
-		if self._downloader is None:
-			return
-
-		username = None
-		password = None
-		downloader_params = self._downloader.params
-
-		# Attempt to use provided username and password or .netrc data
-		if downloader_params.get('username', None) is not None:
-			username = downloader_params['username']
-			password = downloader_params['password']
-		elif downloader_params.get('usenetrc', False):
-			try:
-				info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-				if info is not None:
-					username = info[0]
-					password = info[2]
-				else:
-					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-			except (IOError, netrc.NetrcParseError), err:
-				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
-				return
-
-		# Set language
-		request = urllib2.Request(self._LANG_URL)
-		try:
-			self.report_lang()
-			urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
-			return
-
-		# No authentication to be performed
-		if username is None:
-			return
-
-		# Log in
-		login_form = {
-				'current_form': 'loginForm',
-				'next':		'/',
-				'action_login':	'Log In',
-				'username':	username,
-				'password':	password,
-				}
-		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
-		try:
-			self.report_login()
-			login_results = urllib2.urlopen(request).read()
-			if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
-				self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
-				return
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
-			return
-
-		# Confirm age
-		age_form = {
-				'next_url':		'/',
-				'action_confirm':	'Confirm',
-				}
-		request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form))
-		try:
-			self.report_age_confirmation()
-			age_results = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
-			return
-
-	def _real_extract(self, url):
-		# Extract original video URL from URL with redirection, like age verification, using next_url parameter
-		mobj = re.search(self._NEXT_URL_RE, url)
-		if mobj:
-			url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/')
-
-		# Extract video id from URL
-		mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group(2)
-
-		# Get video webpage
-		self.report_video_webpage_download(video_id)
-		request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
-		try:
-			video_webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		# Attempt to extract SWF player URL
-		mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
-		if mobj is not None:
-			player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
-		else:
-			player_url = None
-
-		# Get video info
-		self.report_video_info_webpage_download(video_id)
-		for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-			video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-					% (video_id, el_type))
-			request = urllib2.Request(video_info_url)
-			try:
-				video_info_webpage = urllib2.urlopen(request).read()
-				video_info = parse_qs(video_info_webpage)
-				if 'token' in video_info:
-					break
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
-				return
-		if 'token' not in video_info:
-			if 'reason' in video_info:
-				self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
-			else:
-				self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
-			return
-
-		# Check for "rental" videos
-		if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-			self._downloader.trouble(u'ERROR: "rental" videos not supported')
-			return
-
-		# Start extracting information
-		self.report_information_extraction(video_id)
-
-		# uploader
-		if 'author' not in video_info:
-			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
-			return
-		video_uploader = urllib.unquote_plus(video_info['author'][0])
-
-		# title
-		if 'title' not in video_info:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = urllib.unquote_plus(video_info['title'][0])
-		video_title = video_title.decode('utf-8')
-
-		# thumbnail image
-		if 'thumbnail_url' not in video_info:
-			self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
-			video_thumbnail = ''
-		else:	# don't panic if we can't find it
-			video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
-
-		# upload date
-		upload_date = u'NA'
-		mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
-		if mobj is not None:
-			upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
-			format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
-			for expression in format_expressions:
-				try:
-					upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
-				except:
-					pass
-
-		# description
-		video_description = get_element_by_id("eow-description", video_webpage.decode('utf8'))
-		if video_description: video_description = clean_html(video_description)
-		else: video_description = ''
-			
-		# closed captions
-		video_subtitles = None
-		if self._downloader.params.get('writesubtitles', False):
-			try:
-				self.report_video_subtitles_download(video_id)
-				request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
-				try:
-					srt_list = urllib2.urlopen(request).read()
-				except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-					raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
-				srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
-				srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
-				if not srt_lang_list:
-					raise Trouble(u'WARNING: video has no closed captions')
-				if self._downloader.params.get('subtitleslang', False):
-					srt_lang = self._downloader.params.get('subtitleslang')
-				elif 'en' in srt_lang_list:
-					srt_lang = 'en'
-				else:
-					srt_lang = srt_lang_list.keys()[0]
-				if not srt_lang in srt_lang_list:
-					raise Trouble(u'WARNING: no closed captions found in the specified language')
-				request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
-				try:
-					srt_xml = urllib2.urlopen(request).read()
-				except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-					raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err))
-				if not srt_xml:
-					raise Trouble(u'WARNING: unable to download video subtitles')
-				video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))
-			except Trouble as trouble:
-				self._downloader.trouble(trouble[0])
-
-		if 'length_seconds' not in video_info:
-			self._downloader.trouble(u'WARNING: unable to extract video duration')
-			video_duration = ''
-		else:
-			video_duration = urllib.unquote_plus(video_info['length_seconds'][0])
-
-		# token
-		video_token = urllib.unquote_plus(video_info['token'][0])
-
-		# Decide which formats to download
-		req_format = self._downloader.params.get('format', None)
-
-		if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
-			self.report_rtmp_download()
-			video_url_list = [(None, video_info['conn'][0])]
-		elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-			url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
-			url_data = [parse_qs(uds) for uds in url_data_strs]
-			url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
-			url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
-
-			format_limit = self._downloader.params.get('format_limit', None)
-			available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
-			if format_limit is not None and format_limit in available_formats:
-				format_list = available_formats[available_formats.index(format_limit):]
-			else:
-				format_list = available_formats
-			existing_formats = [x for x in format_list if x in url_map]
-			if len(existing_formats) == 0:
-				self._downloader.trouble(u'ERROR: no known formats available for video')
-				return
-			if self._downloader.params.get('listformats', None):
-				self._print_formats(existing_formats)
-				return
-			if req_format is None or req_format == 'best':
-				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-			elif req_format == 'worst':
-				video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
-			elif req_format in ('-1', 'all'):
-				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-			else:
-				# Specific formats. We pick the first in a slash-delimeted sequence.
-				# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
-				req_formats = req_format.split('/')
-				video_url_list = None
-				for rf in req_formats:
-					if rf in url_map:
-						video_url_list = [(rf, url_map[rf])]
-						break
-				if video_url_list is None:
-					self._downloader.trouble(u'ERROR: requested format not available')
-					return
-		else:
-			self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
-			return
-
-		results = []
-		for format_param, video_real_url in video_url_list:
-			# Extension
-			video_extension = self._video_extensions.get(format_param, 'flv')
-
-			results.append({
-				'id':		video_id.decode('utf-8'),
-				'url':		video_real_url.decode('utf-8'),
-				'uploader':	video_uploader.decode('utf-8'),
-				'upload_date':	upload_date,
-				'title':	video_title,
-				'ext':		video_extension.decode('utf-8'),
-				'format':	(format_param is None and u'NA' or format_param.decode('utf-8')),
-				'thumbnail':	video_thumbnail.decode('utf-8'),
-				'description':	video_description,
-				'player_url':	player_url,
-				'subtitles':	video_subtitles,
-				'duration':		video_duration
-			})
-		return results
+    """Information extractor for youtube.com."""
+
+    _VALID_URL = r"""^
+                     (
+                         (?:https?://)?                                       # http(s):// (optional)
+                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
+                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
+                         (?!view_play_list|my_playlists|artist|playlist)      # ignore playlist URLs
+                         (?:                                                  # the various things that can precede the ID:
+                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
+                             |(?:                                             # or the v= param in all its forms
+                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
+                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #!
+                                 (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)
+                                 v=
+                             )
+                         )?                                                   # optional -> youtube.com/xxxx is OK
+                     )?                                                       # all until now is optional -> you can pass the naked ID
+                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
+                     (?(1).+)?                                                # if we found the ID, everything can follow
+                     $"""
+    _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+    _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
+    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
+    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
+    _NETRC_MACHINE = 'youtube'
+    # Listed in order of quality
+    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
+    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
+    _video_extensions = {
+        '13': '3gp',
+        '17': 'mp4',
+        '18': 'mp4',
+        '22': 'mp4',
+        '37': 'mp4',
+        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
+        '43': 'webm',
+        '44': 'webm',
+        '45': 'webm',
+        '46': 'webm',
+    }
+    _video_dimensions = {
+        '5': '240x400',
+        '6': '???',
+        '13': '???',
+        '17': '144x176',
+        '18': '360x640',
+        '22': '720x1280',
+        '34': '360x640',
+        '35': '480x854',
+        '37': '1080x1920',
+        '38': '3072x4096',
+        '43': '360x640',
+        '44': '480x854',
+        '45': '720x1280',
+        '46': '1080x1920',
+    }
+    IE_NAME = u'youtube'
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+    def report_lang(self):
+        """Report attempt to set language."""
+        self._downloader.to_screen(u'[youtube] Setting language')
+
+    def report_login(self):
+        """Report attempt to log in."""
+        self._downloader.to_screen(u'[youtube] Logging in')
+
+    def report_age_confirmation(self):
+        """Report attempt to confirm age."""
+        self._downloader.to_screen(u'[youtube] Confirming age')
+
+    def report_video_webpage_download(self, video_id):
+        """Report attempt to download video webpage."""
+        self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
+
+    def report_video_info_webpage_download(self, video_id):
+        """Report attempt to download video info webpage."""
+        self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
+
+    def report_video_subtitles_download(self, video_id):
+        """Report attempt to download video info webpage."""
+        self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
+
+    def report_information_extraction(self, video_id):
+        """Report attempt to extract video information."""
+        self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
+
+    def report_unavailable_format(self, video_id, format):
+        """Report extracted video URL."""
+        self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
+
+    def report_rtmp_download(self):
+        """Indicate the download will use the RTMP protocol."""
+        self._downloader.to_screen(u'[youtube] RTMP download detected')
+
+    def _closed_captions_xml_to_srt(self, xml_string):
+        srt = ''
+        texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
+        # TODO parse xml instead of regex
+        for n, (start, dur_tag, dur, caption) in enumerate(texts):
+            if not dur: dur = '4'
+            start = float(start)
+            end = start + float(dur)
+            start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
+            end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
+            caption = unescapeHTML(caption)
+            caption = unescapeHTML(caption) # double cycle, intentional
+            srt += str(n+1) + '\n'
+            srt += start + ' --> ' + end + '\n'
+            srt += caption + '\n\n'
+        return srt
+
+    def _extract_subtitles(self, video_id):
+        self.report_video_subtitles_download(video_id)
+        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
+        try:
+            srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+        srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
+        srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
+        if not srt_lang_list:
+            return (u'WARNING: video has no closed captions', None)
+        if self._downloader.params.get('subtitleslang', False):
+            srt_lang = self._downloader.params.get('subtitleslang')
+        elif 'en' in srt_lang_list:
+            srt_lang = 'en'
+        else:
+            srt_lang = list(srt_lang_list.keys())[0]
+        if not srt_lang in srt_lang_list:
+            return (u'WARNING: no closed captions found in the specified language', None)
+        request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
+        try:
+            srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
+        if not srt_xml:
+            return (u'WARNING: unable to download video subtitles', None)
+        return (None, self._closed_captions_xml_to_srt(srt_xml))
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for x in formats:
+            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
+
+    def _real_initialize(self):
+        if self._downloader is None:
+            return
+
+        username = None
+        password = None
+        downloader_params = self._downloader.params
+
+        # Attempt to use provided username and password or .netrc data
+        if downloader_params.get('username', None) is not None:
+            username = downloader_params['username']
+            password = downloader_params['password']
+        elif downloader_params.get('usenetrc', False):
+            try:
+                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                if info is not None:
+                    username = info[0]
+                    password = info[2]
+                else:
+                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+            except (IOError, netrc.NetrcParseError) as err:
+                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+                return
+
+        # Set language
+        request = compat_urllib_request.Request(self._LANG_URL)
+        try:
+            self.report_lang()
+            compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
+            return
+
+        # No authentication to be performed
+        if username is None:
+            return
+
+        # Log in
+        login_form = {
+                'current_form': 'loginForm',
+                'next':     '/',
+                'action_login': 'Log In',
+                'username': username,
+                'password': password,
+                }
+        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        try:
+            self.report_login()
+            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
+                self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
+                return
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+            return
+
+        # Confirm age
+        age_form = {
+                'next_url':     '/',
+                'action_confirm':   'Confirm',
+                }
+        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
+        try:
+            self.report_age_confirmation()
+            age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            return
+
+    def _extract_id(self, url):
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group(2)
+        return video_id
+
+    def _real_extract(self, url):
+        # Extract original video URL from URL with redirection, like age verification, using next_url parameter
+        mobj = re.search(self._NEXT_URL_RE, url)
+        if mobj:
+            url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
+        video_id = self._extract_id(url)
+
+        # Get video webpage
+        self.report_video_webpage_download(video_id)
+        url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+        request = compat_urllib_request.Request(url)
+        try:
+            video_webpage_bytes = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            return
+
+        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
+
+        # Attempt to extract SWF player URL
+        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
+        if mobj is not None:
+            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
+        else:
+            player_url = None
+
+        # Get video info
+        self.report_video_info_webpage_download(video_id)
+        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+            video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+                    % (video_id, el_type))
+            request = compat_urllib_request.Request(video_info_url)
+            try:
+                video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
+                video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
+                video_info = compat_parse_qs(video_info_webpage)
+                if 'token' in video_info:
+                    break
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+                return
+        if 'token' not in video_info:
+            if 'reason' in video_info:
+                self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
+            else:
+                self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+            return
+
+        # Check for "rental" videos
+        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
+            self._downloader.trouble(u'ERROR: "rental" videos not supported')
+            return
+
+        # Start extracting information
+        self.report_information_extraction(video_id)
+
+        # uploader
+        if 'author' not in video_info:
+            self._downloader.trouble(u'ERROR: unable to extract uploader name')
+            return
+        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
+
+        # uploader_id
+        video_uploader_id = None
+        mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
+        if mobj is not None:
+            video_uploader_id = mobj.group(1)
+        else:
+            self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+
+        # title
+        if 'title' not in video_info:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
+
+        # thumbnail image
+        if 'thumbnail_url' not in video_info:
+            self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+            video_thumbnail = ''
+        else:   # don't panic if we can't find it
+            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
+
+        # upload date
+        upload_date = None
+        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
+        if mobj is not None:
+            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
+            format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
+            for expression in format_expressions:
+                try:
+                    upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
+                except:
+                    pass
+
+        # description
+        video_description = get_element_by_id("eow-description", video_webpage)
+        if video_description:
+            video_description = clean_html(video_description)
+        else:
+            video_description = ''
+
+        # closed captions
+        video_subtitles = None
+        if self._downloader.params.get('writesubtitles', False):
+            (srt_error, video_subtitles) = self._extract_subtitles(video_id)
+            if srt_error:
+                self._downloader.trouble(srt_error)
+
+        if 'length_seconds' not in video_info:
+            self._downloader.trouble(u'WARNING: unable to extract video duration')
+            video_duration = ''
+        else:
+            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
+
+        # token
+        video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
+
+        # Decide which formats to download
+        req_format = self._downloader.params.get('format', None)
+
+        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
+            self.report_rtmp_download()
+            video_url_list = [(None, video_info['conn'][0])]
+        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
+            url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
+            url_data = [compat_parse_qs(uds) for uds in url_data_strs]
+            url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]
+            url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
+
+            format_limit = self._downloader.params.get('format_limit', None)
+            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
+            if format_limit is not None and format_limit in available_formats:
+                format_list = available_formats[available_formats.index(format_limit):]
+            else:
+                format_list = available_formats
+            existing_formats = [x for x in format_list if x in url_map]
+            if len(existing_formats) == 0:
+                self._downloader.trouble(u'ERROR: no known formats available for video')
+                return
+            if self._downloader.params.get('listformats', None):
+                self._print_formats(existing_formats)
+                return
+            if req_format is None or req_format == 'best':
+                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+            elif req_format == 'worst':
+                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+            elif req_format in ('-1', 'all'):
+                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+            else:
+                # Specific formats. We pick the first in a slash-delimeted sequence.
+                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+                req_formats = req_format.split('/')
+                video_url_list = None
+                for rf in req_formats:
+                    if rf in url_map:
+                        video_url_list = [(rf, url_map[rf])]
+                        break
+                if video_url_list is None:
+                    self._downloader.trouble(u'ERROR: requested format not available')
+                    return
+        else:
+            self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+            return
+
+        results = []
+        for format_param, video_real_url in video_url_list:
+            # Extension
+            video_extension = self._video_extensions.get(format_param, 'flv')
+
+            video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
+                                              self._video_dimensions.get(format_param, '???'))
+
+            results.append({
+                'id':       video_id,
+                'url':      video_real_url,
+                'uploader': video_uploader,
+                'uploader_id': video_uploader_id,
+                'upload_date':  upload_date,
+                'title':    video_title,
+                'ext':      video_extension,
+                'format':   video_format,
+                'thumbnail':    video_thumbnail,
+                'description':  video_description,
+                'player_url':   player_url,
+                'subtitles':    video_subtitles,
+                'duration':     video_duration
+            })
+        return results
 
 
 
 
 class MetacafeIE(InfoExtractor):
 class MetacafeIE(InfoExtractor):
-	"""Information Extractor for metacafe.com."""
-
-	_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
-	_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
-	_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
-	IE_NAME = u'metacafe'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_disclaimer(self):
-		"""Report disclaimer retrieval."""
-		self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
-
-	def report_age_confirmation(self):
-		"""Report attempt to confirm age."""
-		self._downloader.to_screen(u'[metacafe] Confirming age')
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
-
-	def _real_initialize(self):
-		# Retrieve disclaimer
-		request = urllib2.Request(self._DISCLAIMER)
-		try:
-			self.report_disclaimer()
-			disclaimer = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
-			return
-
-		# Confirm age
-		disclaimer_form = {
-			'filters': '0',
-			'submit': "Continue - I'm over 18",
-			}
-		request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form))
-		try:
-			self.report_age_confirmation()
-			disclaimer = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
-			return
-
-	def _real_extract(self, url):
-		# Extract id and simplified title from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1)
-
-		# Check if video comes from YouTube
-		mobj2 = re.match(r'^yt-(.*)$', video_id)
-		if mobj2 is not None:
-			self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
-			return
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract URL, uploader and title from webpage
-		self.report_extraction(video_id)
-		mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
-		if mobj is not None:
-			mediaURL = urllib.unquote(mobj.group(1))
-			video_extension = mediaURL[-3:]
-
-			# Extract gdaKey if available
-			mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
-			if mobj is None:
-				video_url = mediaURL
-			else:
-				gdaKey = mobj.group(1)
-				video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-		else:
-			mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: unable to extract media URL')
-				return
-			vardict = parse_qs(mobj.group(1))
-			if 'mediaData' not in vardict:
-				self._downloader.trouble(u'ERROR: unable to extract media URL')
-				return
-			mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: unable to extract media URL')
-				return
-			mediaURL = mobj.group(1).replace('\\/', '/')
-			video_extension = mediaURL[-3:]
-			video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
-
-		mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		mobj = re.search(r'submitter=(.*?);', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
-			return
-		video_uploader = mobj.group(1)
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url.decode('utf-8'),
-			'uploader':	video_uploader.decode('utf-8'),
-			'upload_date':	u'NA',
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Information Extractor for metacafe.com."""
+
+    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
+    _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
+    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
+    IE_NAME = u'metacafe'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_disclaimer(self):
+        """Report disclaimer retrieval."""
+        self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
+
+    def report_age_confirmation(self):
+        """Report attempt to confirm age."""
+        self._downloader.to_screen(u'[metacafe] Confirming age')
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
+
+    def _real_initialize(self):
+        # Retrieve disclaimer
+        request = compat_urllib_request.Request(self._DISCLAIMER)
+        try:
+            self.report_disclaimer()
+            disclaimer = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
+            return
+
+        # Confirm age
+        disclaimer_form = {
+            'filters': '0',
+            'submit': "Continue - I'm over 18",
+            }
+        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+        try:
+            self.report_age_confirmation()
+            disclaimer = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+            return
+
+    def _real_extract(self, url):
+        # Extract id and simplified title from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1)
+
+        # Check if video comes from YouTube
+        mobj2 = re.match(r'^yt-(.*)$', video_id)
+        if mobj2 is not None:
+            self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
+            return
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
+        try:
+            self.report_download_webpage(video_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
+            return
+
+        # Extract URL, uploader and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
+        if mobj is not None:
+            mediaURL = compat_urllib_parse.unquote(mobj.group(1))
+            video_extension = mediaURL[-3:]
+
+            # Extract gdaKey if available
+            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
+            if mobj is None:
+                video_url = mediaURL
+            else:
+                gdaKey = mobj.group(1)
+                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
+        else:
+            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
+            if mobj is None:
+                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                return
+            vardict = compat_parse_qs(mobj.group(1))
+            if 'mediaData' not in vardict:
+                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                return
+            mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
+            if mobj is None:
+                self._downloader.trouble(u'ERROR: unable to extract media URL')
+                return
+            mediaURL = mobj.group(1).replace('\\/', '/')
+            video_extension = mediaURL[-3:]
+            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
+
+        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        video_title = mobj.group(1).decode('utf-8')
+
+        mobj = re.search(r'submitter=(.*?);', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+            return
+        video_uploader = mobj.group(1)
+
+        return [{
+            'id':       video_id.decode('utf-8'),
+            'url':      video_url.decode('utf-8'),
+            'uploader': video_uploader.decode('utf-8'),
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension.decode('utf-8'),
+        }]
 
 
 
 
 class DailymotionIE(InfoExtractor):
 class DailymotionIE(InfoExtractor):
-	"""Information Extractor for Dailymotion"""
-
-	_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
-	IE_NAME = u'dailymotion'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
-
-	def _real_extract(self, url):
-		# Extract id and simplified title from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1).split('_')[0].split('?')[0]
-
-		video_extension = 'mp4'
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request(url)
-		request.add_header('Cookie', 'family_filter=off')
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract URL, uploader and title from webpage
-		self.report_extraction(video_id)
-		mobj = re.search(r'\s*var flashvars = (.*)', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract media URL')
-			return
-		flashvars = urllib.unquote(mobj.group(1))
-
-		for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
-			if key in flashvars:
-				max_quality = key
-				self._downloader.to_screen(u'[dailymotion] Using %s' % key)
-				break
-		else:
-			self._downloader.trouble(u'ERROR: unable to extract video URL')
-			return
-
-		mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video URL')
-			return
-
-		video_url = urllib.unquote(mobj.group(1)).replace('\\/', '/')
-
-		# TODO: support choosing qualities
-
-		mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_title = unescapeHTML(mobj.group('title').decode('utf-8'))
-
-		video_uploader = u'NA'
-		mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
-		if mobj is None:
-			# lookin for official user
-			mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
-			if mobj_official is None:
-				self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
-			else:
-				video_uploader = mobj_official.group(1)
-		else:
-			video_uploader = mobj.group(1)
-
-		video_upload_date = u'NA'
-		mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
-		if mobj is not None:
-			video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url.decode('utf-8'),
-			'uploader':	video_uploader.decode('utf-8'),
-			'upload_date':	video_upload_date,
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
-
-
-class GoogleIE(InfoExtractor):
-	"""Information extractor for video.google.com."""
-
-	_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
-	IE_NAME = u'video.google'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
-
-	def _real_extract(self, url):
-		# Extract id from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1)
-
-		video_extension = 'mp4'
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract URL, uploader, and title from webpage
-		self.report_extraction(video_id)
-		mobj = re.search(r"download_url:'([^']+)'", webpage)
-		if mobj is None:
-			video_extension = 'flv'
-			mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract media URL')
-			return
-		mediaURL = urllib.unquote(mobj.group(1))
-		mediaURL = mediaURL.replace('\\x3d', '\x3d')
-		mediaURL = mediaURL.replace('\\x26', '\x26')
-
-		video_url = mediaURL
-
-		mobj = re.search(r'<title>(.*)</title>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		# Extract video description
-		mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video description')
-			return
-		video_description = mobj.group(1).decode('utf-8')
-		if not video_description:
-			video_description = 'No description available.'
-
-		# Extract video thumbnail
-		if self._downloader.params.get('forcethumbnail', False):
-			request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
-			try:
-				webpage = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-				return
-			mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
-				return
-			video_thumbnail = mobj.group(1)
-		else:	# we need something to pass to process_info
-			video_thumbnail = ''
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url.decode('utf-8'),
-			'uploader':	u'NA',
-			'upload_date':	u'NA',
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Information Extractor for Dailymotion"""
+
+    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
+    IE_NAME = u'dailymotion'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
+
+    def _real_extract(self, url):
+        # Extract id and simplified title from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1).split('_')[0].split('?')[0]
+
+        video_extension = 'mp4'
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url)
+        request.add_header('Cookie', 'family_filter=off')
+        webpage = self._download_webpage(request, video_id)
+
+        # Extract URL, uploader and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'\s*var flashvars = (.*)', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            return
+        flashvars = compat_urllib_parse.unquote(mobj.group(1))
+
+        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
+            if key in flashvars:
+                max_quality = key
+                self._downloader.to_screen(u'[dailymotion] Using %s' % key)
+                break
+        else:
+            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            return
+
+        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video URL')
+            return
+
+        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
+
+        # TODO: support choosing qualities
+
+        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        video_title = unescapeHTML(mobj.group('title'))
+
+        video_uploader = None
+        mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)
+        if mobj is None:
+            # lookin for official user
+            mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
+            if mobj_official is None:
+                self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+            else:
+                video_uploader = mobj_official.group(1)
+        else:
+            video_uploader = mobj.group(1)
+
+        video_upload_date = None
+        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
+        if mobj is not None:
+            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'upload_date':  video_upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
 
 
 
 
 class PhotobucketIE(InfoExtractor):
 class PhotobucketIE(InfoExtractor):
-	"""Information extractor for photobucket.com."""
-
-	_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
-	IE_NAME = u'photobucket'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
-
-	def _real_extract(self, url):
-		# Extract id from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1)
-
-		video_extension = 'flv'
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request(url)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract URL, uploader, and title from webpage
-		self.report_extraction(video_id)
-		mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract media URL')
-			return
-		mediaURL = urllib.unquote(mobj.group(1))
-
-		video_url = mediaURL
-
-		mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		video_uploader = mobj.group(2).decode('utf-8')
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url.decode('utf-8'),
-			'uploader':	video_uploader,
-			'upload_date':	u'NA',
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Information extractor for photobucket.com."""
+
+    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+    IE_NAME = u'photobucket'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
+
+    def _real_extract(self, url):
+        # Extract id from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1)
+
+        video_extension = 'flv'
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url)
+        try:
+            self.report_download_webpage(video_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+
+        # Extract URL, uploader, and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            return
+        mediaURL = compat_urllib_parse.unquote(mobj.group(1))
+
+        video_url = mediaURL
+
+        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        video_title = mobj.group(1).decode('utf-8')
+
+        video_uploader = mobj.group(2).decode('utf-8')
+
+        return [{
+            'id':       video_id.decode('utf-8'),
+            'url':      video_url.decode('utf-8'),
+            'uploader': video_uploader,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension.decode('utf-8'),
+        }]
 
 
 
 
 class YahooIE(InfoExtractor):
 class YahooIE(InfoExtractor):
-	"""Information extractor for video.yahoo.com."""
-
-	# _VALID_URL matches all Yahoo! Video URLs
-	# _VPAGE_URL matches only the extractable '/watch/' URLs
-	_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
-	_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
-	IE_NAME = u'video.yahoo'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
-
-	def _real_extract(self, url, new_video=True):
-		# Extract ID from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(2)
-		video_extension = 'flv'
-
-		# Rewrite valid but non-extractable URLs as
-		# extractable English language /watch/ URLs
-		if re.match(self._VPAGE_URL, url) is None:
-			request = urllib2.Request(url)
-			try:
-				webpage = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-				return
-
-			mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: Unable to extract id field')
-				return
-			yahoo_id = mobj.group(1)
-
-			mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: Unable to extract vid field')
-				return
-			yahoo_vid = mobj.group(1)
-
-			url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
-			return self._real_extract(url, new_video=False)
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request(url)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract uploader and title from webpage
-		self.report_extraction(video_id)
-		mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video uploader')
-			return
-		video_uploader = mobj.group(1).decode('utf-8')
-
-		# Extract video thumbnail
-		mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
-			return
-		video_thumbnail = mobj.group(1).decode('utf-8')
-
-		# Extract video description
-		mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video description')
-			return
-		video_description = mobj.group(1).decode('utf-8')
-		if not video_description:
-			video_description = 'No description available.'
-
-		# Extract video height and width
-		mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video height')
-			return
-		yv_video_height = mobj.group(1)
-
-		mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video width')
-			return
-		yv_video_width = mobj.group(1)
-
-		# Retrieve video playlist to extract media URL
-		# I'm not completely sure what all these options are, but we
-		# seem to need most of them, otherwise the server sends a 401.
-		yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
-		yv_bitrate = '700'  # according to Wikipedia this is hard-coded
-		request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
-				'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
-				'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		# Extract media URL from playlist XML
-		mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Unable to extract media URL')
-			return
-		video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
-		video_url = unescapeHTML(video_url)
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url,
-			'uploader':	video_uploader,
-			'upload_date':	u'NA',
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'thumbnail':	video_thumbnail.decode('utf-8'),
-			'description':	video_description,
-			'thumbnail':	video_thumbnail,
-			'player_url':	None,
-		}]
+    """Information extractor for video.yahoo.com."""
+
+    _WORKING = False
+    # _VALID_URL matches all Yahoo! Video URLs
+    # _VPAGE_URL matches only the extractable '/watch/' URLs
+    _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
+    _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
+    IE_NAME = u'video.yahoo'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
+
+    def _real_extract(self, url, new_video=True):
+        # Extract ID from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(2)
+        video_extension = 'flv'
+
+        # Rewrite valid but non-extractable URLs as
+        # extractable English language /watch/ URLs
+        if re.match(self._VPAGE_URL, url) is None:
+            request = compat_urllib_request.Request(url)
+            try:
+                webpage = compat_urllib_request.urlopen(request).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+                return
+
+            mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
+            if mobj is None:
+                self._downloader.trouble(u'ERROR: Unable to extract id field')
+                return
+            yahoo_id = mobj.group(1)
+
+            mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
+            if mobj is None:
+                self._downloader.trouble(u'ERROR: Unable to extract vid field')
+                return
+            yahoo_vid = mobj.group(1)
+
+            url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
+            return self._real_extract(url, new_video=False)
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url)
+        try:
+            self.report_download_webpage(video_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+
+        # Extract uploader and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = mobj.group(1).decode('utf-8')
+
+        mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video uploader')
+            return
+        video_uploader = mobj.group(1).decode('utf-8')
+
+        # Extract video thumbnail
+        mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            return
+        video_thumbnail = mobj.group(1).decode('utf-8')
+
+        # Extract video description
+        mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video description')
+            return
+        video_description = mobj.group(1).decode('utf-8')
+        if not video_description:
+            video_description = 'No description available.'
+
+        # Extract video height and width
+        mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video height')
+            return
+        yv_video_height = mobj.group(1)
+
+        mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video width')
+            return
+        yv_video_width = mobj.group(1)
+
+        # Retrieve video playlist to extract media URL
+        # I'm not completely sure what all these options are, but we
+        # seem to need most of them, otherwise the server sends a 401.
+        yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents
+        yv_bitrate = '700'  # according to Wikipedia this is hard-coded
+        request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
+                '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
+                '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
+        try:
+            self.report_download_webpage(video_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+
+        # Extract media URL from playlist XML
+        mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Unable to extract media URL')
+            return
+        video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
+        video_url = unescapeHTML(video_url)
+
+        return [{
+            'id':       video_id.decode('utf-8'),
+            'url':      video_url,
+            'uploader': video_uploader,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension.decode('utf-8'),
+            'thumbnail':    video_thumbnail.decode('utf-8'),
+            'description':  video_description,
+        }]
 
 
 
 
 class VimeoIE(InfoExtractor):
 class VimeoIE(InfoExtractor):
-	"""Information extractor for vimeo.com."""
-
-	# _VALID_URL matches Vimeo URLs
-	_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
-	IE_NAME = u'vimeo'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
-
-	def _real_extract(self, url, new_video=True):
-		# Extract ID from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1)
-
-		# Retrieve video webpage to extract further information
-		request = urllib2.Request(url, None, std_headers)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		# Now we begin extracting as much information as we can from what we
-		# retrieved. First we extract the information common to all extractors,
-		# and latter we extract those that are Vimeo specific.
-		self.report_extraction(video_id)
-
-		# Extract the config JSON
-		config = webpage.split(' = {config:')[1].split(',assets:')[0]
-		try:
-			config = json.loads(config)
-		except:
-			self._downloader.trouble(u'ERROR: unable to extract info section')
-			return
-		
-		# Extract title
-		video_title = config["video"]["title"]
-
-		# Extract uploader
-		video_uploader = config["video"]["owner"]["name"]
-
-		# Extract video thumbnail
-		video_thumbnail = config["video"]["thumbnail"]
-
-		# Extract video description
-		video_description = get_element_by_id("description", webpage.decode('utf8'))
-		if video_description: video_description = clean_html(video_description)
-		else: video_description = ''
-
-		# Extract upload date
-		video_upload_date = u'NA'
-		mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
-		if mobj is not None:
-			video_upload_date = mobj.group(1)
-
-		# Vimeo specific: extract request signature and timestamp
-		sig = config['request']['signature']
-		timestamp = config['request']['timestamp']
-
-		# Vimeo specific: extract video codec and quality information
-		# TODO bind to format param
-		codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
-		for codec in codecs:
-			if codec[0] in config["video"]["files"]:
-				video_codec = codec[0]
-				video_extension = codec[1]
-				if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd'
-				else: quality = 'sd'
-				break
-		else:
-			self._downloader.trouble(u'ERROR: no known codec found')
-			return
-
-		video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
-					%(video_id, sig, timestamp, quality, video_codec.upper())
-
-		return [{
-			'id':		video_id,
-			'url':		video_url,
-			'uploader':	video_uploader,
-			'upload_date':	video_upload_date,
-			'title':	video_title,
-			'ext':		video_extension,
-			'thumbnail':	video_thumbnail,
-			'description':	video_description,
-			'player_url':	None,
-		}]
+    """Information extractor for vimeo.com."""
+
+    # _VALID_URL matches Vimeo URLs
+    _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
+    IE_NAME = u'vimeo'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
+
+    def _real_extract(self, url, new_video=True):
+        # Extract ID from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1)
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url, None, std_headers)
+        try:
+            self.report_download_webpage(video_id)
+            webpage_bytes = compat_urllib_request.urlopen(request).read()
+            webpage = webpage_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+
+        # Now we begin extracting as much information as we can from what we
+        # retrieved. First we extract the information common to all extractors,
+        # and latter we extract those that are Vimeo specific.
+        self.report_extraction(video_id)
+
+        # Extract the config JSON
+        try:
+            config = webpage.split(' = {config:')[1].split(',assets:')[0]
+            config = json.loads(config)
+        except:
+            self._downloader.trouble(u'ERROR: unable to extract info section')
+            return
+
+        # Extract title
+        video_title = config["video"]["title"]
+
+        # Extract uploader and uploader_id
+        video_uploader = config["video"]["owner"]["name"]
+        video_uploader_id = config["video"]["owner"]["url"].split('/')[-1]
+
+        # Extract video thumbnail
+        video_thumbnail = config["video"]["thumbnail"]
+
+        # Extract video description
+        video_description = get_element_by_attribute("itemprop", "description", webpage)
+        if video_description: video_description = clean_html(video_description)
+        else: video_description = ''
+
+        # Extract upload date
+        video_upload_date = None
+        mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
+        if mobj is not None:
+            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
+
+        # Vimeo specific: extract request signature and timestamp
+        sig = config['request']['signature']
+        timestamp = config['request']['timestamp']
+
+        # Vimeo specific: extract video codec and quality information
+        # First consider quality, then codecs, then take everything
+        # TODO bind to format param
+        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
+        files = { 'hd': [], 'sd': [], 'other': []}
+        for codec_name, codec_extension in codecs:
+            if codec_name in config["video"]["files"]:
+                if 'hd' in config["video"]["files"][codec_name]:
+                    files['hd'].append((codec_name, codec_extension, 'hd'))
+                elif 'sd' in config["video"]["files"][codec_name]:
+                    files['sd'].append((codec_name, codec_extension, 'sd'))
+                else:
+                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
+
+        for quality in ('hd', 'sd', 'other'):
+            if len(files[quality]) > 0:
+                video_quality = files[quality][0][2]
+                video_codec = files[quality][0][0]
+                video_extension = files[quality][0][1]
+                self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
+                break
+        else:
+            self._downloader.trouble(u'ERROR: no known codec found')
+            return
+
+        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
+                    %(video_id, sig, timestamp, video_quality, video_codec.upper())
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'uploader_id': video_uploader_id,
+            'upload_date':  video_upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+            'thumbnail':    video_thumbnail,
+            'description':  video_description,
+        }]
+
+
+class ArteTvIE(InfoExtractor):
+    """arte.tv information extractor."""
+
+    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
+    _LIVE_URL = r'index-[0-9]+\.html$'
+
+    IE_NAME = u'arte.tv'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
+
+    def fetch_webpage(self, url):
+        request = compat_urllib_request.Request(url)
+        try:
+            self.report_download_webpage(url)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+        except ValueError as err:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+        return webpage
+
+    def grep_webpage(self, url, regex, regexFlags, matchTuples):
+        page = self.fetch_webpage(url)
+        mobj = re.search(regex, page, regexFlags)
+        info = {}
+
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        for (i, key, err) in matchTuples:
+            if mobj.group(i) is None:
+                self._downloader.trouble(err)
+                return
+            else:
+                info[key] = mobj.group(i)
+
+        return info
+
+    def extractLiveStream(self, url):
+        video_lang = url.split('/')[-4]
+        info = self.grep_webpage(
+            url,
+            r'src="(.*?/videothek_js.*?\.js)',
+            0,
+            [
+                (1, 'url', u'ERROR: Invalid URL: %s' % url)
+            ]
+        )
+        http_host = url.split('/')[2]
+        next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
+        info = self.grep_webpage(
+            next_url,
+            r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
+                '(http://.*?\.swf).*?' +
+                '(rtmp://.*?)\'',
+            re.DOTALL,
+            [
+                (1, 'path',   u'ERROR: could not extract video path: %s' % url),
+                (2, 'player', u'ERROR: could not extract video player: %s' % url),
+                (3, 'url',    u'ERROR: could not extract video url: %s' % url)
+            ]
+        )
+        video_url = u'%s/%s' % (info.get('url'), info.get('path'))
+
+    def extractPlus7Stream(self, url):
+        video_lang = url.split('/')[-3]
+        info = self.grep_webpage(
+            url,
+            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
+            0,
+            [
+                (1, 'url', u'ERROR: Invalid URL: %s' % url)
+            ]
+        )
+        next_url = compat_urllib_parse.unquote(info.get('url'))
+        info = self.grep_webpage(
+            next_url,
+            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
+            0,
+            [
+                (1, 'url', u'ERROR: Could not find <video> tag: %s' % url)
+            ]
+        )
+        next_url = compat_urllib_parse.unquote(info.get('url'))
+
+        info = self.grep_webpage(
+            next_url,
+            r'<video id="(.*?)".*?>.*?' +
+                '<name>(.*?)</name>.*?' +
+                '<dateVideo>(.*?)</dateVideo>.*?' +
+                '<url quality="hd">(.*?)</url>',
+            re.DOTALL,
+            [
+                (1, 'id',    u'ERROR: could not extract video id: %s' % url),
+                (2, 'title', u'ERROR: could not extract video title: %s' % url),
+                (3, 'date',  u'ERROR: could not extract video date: %s' % url),
+                (4, 'url',   u'ERROR: could not extract video url: %s' % url)
+            ]
+        )
+
+        return {
+            'id':           info.get('id'),
+            'url':          compat_urllib_parse.unquote(info.get('url')),
+            'uploader':     u'arte.tv',
+            'upload_date':  info.get('date'),
+            'title':        info.get('title').decode('utf-8'),
+            'ext':          u'mp4',
+            'format':       u'NA',
+            'player_url':   None,
+        }
+
+    def _real_extract(self, url):
+        video_id = url.split('/')[-1]
+        self.report_extraction(video_id)
+
+        if re.search(self._LIVE_URL, video_id) is not None:
+            self.extractLiveStream(url)
+            return
+        else:
+            info = self.extractPlus7Stream(url)
+
+        return [info]
 
 
 
 
 class GenericIE(InfoExtractor):
 class GenericIE(InfoExtractor):
-	"""Generic last-resort information extractor."""
-
-	_VALID_URL = r'.*'
-	IE_NAME = u'generic'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
-		self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
-
-	def report_following_redirect(self, new_url):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
-		
-	def _test_redirect(self, url):
-		"""Check if it is a redirect, like url shorteners, in case restart chain."""
-		class HeadRequest(urllib2.Request):
-			def get_method(self):
-				return "HEAD"
-
-		class HEADRedirectHandler(urllib2.HTTPRedirectHandler):
-			"""
-			Subclass the HTTPRedirectHandler to make it use our 
-			HeadRequest also on the redirected URL
-			"""
-			def redirect_request(self, req, fp, code, msg, headers, newurl): 
-				if code in (301, 302, 303, 307):
-					newurl = newurl.replace(' ', '%20') 
-					newheaders = dict((k,v) for k,v in req.headers.items()
-									  if k.lower() not in ("content-length", "content-type"))
-					return HeadRequest(newurl, 
-									   headers=newheaders,
-									   origin_req_host=req.get_origin_req_host(), 
-									   unverifiable=True) 
-				else: 
-					raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) 
-
-		class HTTPMethodFallback(urllib2.BaseHandler):
-			"""
-			Fallback to GET if HEAD is not allowed (405 HTTP error)
-			"""
-			def http_error_405(self, req, fp, code, msg, headers): 
-				fp.read()
-				fp.close()
-
-				newheaders = dict((k,v) for k,v in req.headers.items()
-								  if k.lower() not in ("content-length", "content-type"))
-				return self.parent.open(urllib2.Request(req.get_full_url(), 
-												 headers=newheaders, 
-												 origin_req_host=req.get_origin_req_host(), 
-												 unverifiable=True))
-
-		# Build our opener
-		opener = urllib2.OpenerDirector() 
-		for handler in [urllib2.HTTPHandler, urllib2.HTTPDefaultErrorHandler,
-						HTTPMethodFallback, HEADRedirectHandler,
-						urllib2.HTTPErrorProcessor, urllib2.HTTPSHandler]:
-			opener.add_handler(handler())
-
-		response = opener.open(HeadRequest(url))
-		new_url = response.geturl()
-		
-		if url == new_url: return False
-		
-		self.report_following_redirect(new_url)
-		self._downloader.download([new_url])
-		return True
-
-	def _real_extract(self, url):
-		if self._test_redirect(url): return
-
-		video_id = url.split('/')[-1]
-		request = urllib2.Request(url)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-		except ValueError, err:
-			# since this is the last-resort InfoExtractor, if
-			# this error is thrown, it'll be thrown here
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		self.report_extraction(video_id)
-		# Start with something easy: JW Player in SWFObject
-		mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
-		if mobj is None:
-			# Broaden the search a little bit
-			mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		# It's possible that one of the regexes
-		# matched, but returned an empty group:
-		if mobj.group(1) is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		video_url = urllib.unquote(mobj.group(1))
-		video_id = os.path.basename(video_url)
-
-		# here's a fun little line of code for you:
-		video_extension = os.path.splitext(video_id)[1][1:]
-		video_id = os.path.splitext(video_id)[0]
-
-		# it's tempting to parse this further, but you would
-		# have to take into account all the variations like
-		#   Video Title - Site Name
-		#   Site Name | Video Title
-		#   Video Title - Tagline | Site Name
-		# and so on and so forth; it's just not practical
-		mobj = re.search(r'<title>(.*)</title>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		# video uploader is domain name
-		mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		video_uploader = mobj.group(1).decode('utf-8')
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url.decode('utf-8'),
-			'uploader':	video_uploader,
-			'upload_date':	u'NA',
-			'title':	video_title,
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Generic last-resort information extractor."""
+
+    _VALID_URL = r'.*'
+    IE_NAME = u'generic'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
+        self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
+
+    def report_following_redirect(self, new_url):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
+
+    def _test_redirect(self, url):
+        """Check if it is a redirect, like url shorteners, in case restart chain."""
+        class HeadRequest(compat_urllib_request.Request):
+            def get_method(self):
+                return "HEAD"
+
+        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
+            """
+            Subclass the HTTPRedirectHandler to make it use our
+            HeadRequest also on the redirected URL
+            """
+            def redirect_request(self, req, fp, code, msg, headers, newurl):
+                if code in (301, 302, 303, 307):
+                    newurl = newurl.replace(' ', '%20')
+                    newheaders = dict((k,v) for k,v in req.headers.items()
+                                      if k.lower() not in ("content-length", "content-type"))
+                    return HeadRequest(newurl,
+                                       headers=newheaders,
+                                       origin_req_host=req.get_origin_req_host(),
+                                       unverifiable=True)
+                else:
+                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+        class HTTPMethodFallback(compat_urllib_request.BaseHandler):
+            """
+            Fallback to GET if HEAD is not allowed (405 HTTP error)
+            """
+            def http_error_405(self, req, fp, code, msg, headers):
+                fp.read()
+                fp.close()
+
+                newheaders = dict((k,v) for k,v in req.headers.items()
+                                  if k.lower() not in ("content-length", "content-type"))
+                return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
+                                                 headers=newheaders,
+                                                 origin_req_host=req.get_origin_req_host(),
+                                                 unverifiable=True))
+
+        # Build our opener
+        opener = compat_urllib_request.OpenerDirector()
+        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
+                        HTTPMethodFallback, HEADRedirectHandler,
+                        compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+            opener.add_handler(handler())
+
+        response = opener.open(HeadRequest(url))
+        new_url = response.geturl()
+
+        if url == new_url:
+            return False
+
+        self.report_following_redirect(new_url)
+        self._downloader.download([new_url])
+        return True
+
+    def _real_extract(self, url):
+        if self._test_redirect(url): return
+
+        video_id = url.split('/')[-1]
+        request = compat_urllib_request.Request(url)
+        try:
+            self.report_download_webpage(video_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+        except ValueError as err:
+            # since this is the last-resort InfoExtractor, if
+            # this error is thrown, it'll be thrown here
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        self.report_extraction(video_id)
+        # Start with something easy: JW Player in SWFObject
+        mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Broaden the search a little bit
+            mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        # It's possible that one of the regexes
+        # matched, but returned an empty group:
+        if mobj.group(1) is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        video_url = compat_urllib_parse.unquote(mobj.group(1))
+        video_id = os.path.basename(video_url)
+
+        # here's a fun little line of code for you:
+        video_extension = os.path.splitext(video_id)[1][1:]
+        video_id = os.path.splitext(video_id)[0]
+
+        # it's tempting to parse this further, but you would
+        # have to take into account all the variations like
+        #   Video Title - Site Name
+        #   Site Name | Video Title
+        #   Video Title - Tagline | Site Name
+        # and so on and so forth; it's just not practical
+        mobj = re.search(r'<title>(.*)</title>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        video_title = mobj.group(1)
+
+        # video uploader is domain name
+        mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        video_uploader = mobj.group(1)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
 
 
 
 
 class YoutubeSearchIE(InfoExtractor):
 class YoutubeSearchIE(InfoExtractor):
-	"""Information Extractor for YouTube search queries."""
-	_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
-	_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
-	_max_youtube_results = 1000
-	IE_NAME = u'youtube:search'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_page(self, query, pagenum):
-		"""Report attempt to download search page with given number."""
-		query = query.decode(preferredencoding())
-		self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
-
-	def _real_extract(self, query):
-		mobj = re.match(self._VALID_URL, query)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
-			return
-
-		prefix, query = query.split(':')
-		prefix = prefix[8:]
-		query = query.encode('utf-8')
-		if prefix == '':
-			self._download_n_results(query, 1)
-			return
-		elif prefix == 'all':
-			self._download_n_results(query, self._max_youtube_results)
-			return
-		else:
-			try:
-				n = long(prefix)
-				if n <= 0:
-					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
-					return
-				elif n > self._max_youtube_results:
-					self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
-					n = self._max_youtube_results
-				self._download_n_results(query, n)
-				return
-			except ValueError: # parsing prefix as integer fails
-				self._download_n_results(query, 1)
-				return
-
-	def _download_n_results(self, query, n):
-		"""Downloads a specified number of results for a query"""
-
-		video_ids = []
-		pagenum = 0
-		limit = n
-
-		while (50 * pagenum) < limit:
-			self.report_download_page(query, pagenum+1)
-			result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1)
-			request = urllib2.Request(result_url)
-			try:
-				data = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err))
-				return
-			api_response = json.loads(data)['data']
-
-			new_ids = list(video['id'] for video in api_response['items'])
-			video_ids += new_ids
-
-			limit = min(n, api_response['totalItems'])
-			pagenum += 1
-
-		if len(video_ids) > n:
-			video_ids = video_ids[:n]
-		for id in video_ids:
-			self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
-		return
+    """Information Extractor for YouTube search queries."""
+    _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
+    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
+    _max_youtube_results = 1000
+    IE_NAME = u'youtube:search'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_page(self, query, pagenum):
+        """Report attempt to download search page with given number."""
+        query = query.decode(preferredencoding())
+        self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+
+    def _real_extract(self, query):
+        mobj = re.match(self._VALID_URL, query)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            return
+
+        prefix, query = query.split(':')
+        prefix = prefix[8:]
+        query = query.encode('utf-8')
+        if prefix == '':
+            self._download_n_results(query, 1)
+            return
+        elif prefix == 'all':
+            self._download_n_results(query, self._max_youtube_results)
+            return
+        else:
+            try:
+                n = int(prefix)
+                if n <= 0:
+                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    return
+                elif n > self._max_youtube_results:
+                    self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
+                    n = self._max_youtube_results
+                self._download_n_results(query, n)
+                return
+            except ValueError: # parsing prefix as integer fails
+                self._download_n_results(query, 1)
+                return
+
+    def _download_n_results(self, query, n):
+        """Downloads a specified number of results for a query"""
+
+        video_ids = []
+        pagenum = 0
+        limit = n
+
+        while (50 * pagenum) < limit:
+            self.report_download_page(query, pagenum+1)
+            result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
+            request = compat_urllib_request.Request(result_url)
+            try:
+                data = compat_urllib_request.urlopen(request).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
+                return
+            api_response = json.loads(data)['data']
+
+            new_ids = list(video['id'] for video in api_response['items'])
+            video_ids += new_ids
+
+            limit = min(n, api_response['totalItems'])
+            pagenum += 1
+
+        if len(video_ids) > n:
+            video_ids = video_ids[:n]
+        for id in video_ids:
+            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
+        return
 
 
 
 
 class GoogleSearchIE(InfoExtractor):
 class GoogleSearchIE(InfoExtractor):
-	"""Information Extractor for Google Video search queries."""
-	_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
-	_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
-	_VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
-	_MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
-	_max_google_results = 1000
-	IE_NAME = u'video.google:search'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_page(self, query, pagenum):
-		"""Report attempt to download playlist page with given number."""
-		query = query.decode(preferredencoding())
-		self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
-
-	def _real_extract(self, query):
-		mobj = re.match(self._VALID_URL, query)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
-			return
-
-		prefix, query = query.split(':')
-		prefix = prefix[8:]
-		query = query.encode('utf-8')
-		if prefix == '':
-			self._download_n_results(query, 1)
-			return
-		elif prefix == 'all':
-			self._download_n_results(query, self._max_google_results)
-			return
-		else:
-			try:
-				n = long(prefix)
-				if n <= 0:
-					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
-					return
-				elif n > self._max_google_results:
-					self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
-					n = self._max_google_results
-				self._download_n_results(query, n)
-				return
-			except ValueError: # parsing prefix as integer fails
-				self._download_n_results(query, 1)
-				return
-
-	def _download_n_results(self, query, n):
-		"""Downloads a specified number of results for a query"""
-
-		video_ids = []
-		pagenum = 0
-
-		while True:
-			self.report_download_page(query, pagenum)
-			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum*10)
-			request = urllib2.Request(result_url)
-			try:
-				page = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
-
-			# Extract video identifiers
-			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-				video_id = mobj.group(1)
-				if video_id not in video_ids:
-					video_ids.append(video_id)
-					if len(video_ids) == n:
-						# Specified n videos reached
-						for id in video_ids:
-							self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
-						return
-
-			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-				for id in video_ids:
-					self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
-				return
-
-			pagenum = pagenum + 1
+    """Information Extractor for Google Video search queries."""
+    _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
+    _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
+    _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
+    _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
+    _max_google_results = 1000
+    IE_NAME = u'video.google:search'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_page(self, query, pagenum):
+        """Report attempt to download playlist page with given number."""
+        query = query.decode(preferredencoding())
+        self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
+
+    def _real_extract(self, query):
+        mobj = re.match(self._VALID_URL, query)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            return
+
+        prefix, query = query.split(':')
+        prefix = prefix[8:]
+        query = query.encode('utf-8')
+        if prefix == '':
+            self._download_n_results(query, 1)
+            return
+        elif prefix == 'all':
+            self._download_n_results(query, self._max_google_results)
+            return
+        else:
+            try:
+                n = int(prefix)
+                if n <= 0:
+                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    return
+                elif n > self._max_google_results:
+                    self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
+                    n = self._max_google_results
+                self._download_n_results(query, n)
+                return
+            except ValueError: # parsing prefix as integer fails
+                self._download_n_results(query, 1)
+                return
+
+    def _download_n_results(self, query, n):
+        """Downloads a specified number of results for a query"""
+
+        video_ids = []
+        pagenum = 0
+
+        while True:
+            self.report_download_page(query, pagenum)
+            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10)
+            request = compat_urllib_request.Request(result_url)
+            try:
+                page = compat_urllib_request.urlopen(request).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
+
+            # Extract video identifiers
+            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                video_id = mobj.group(1)
+                if video_id not in video_ids:
+                    video_ids.append(video_id)
+                    if len(video_ids) == n:
+                        # Specified n videos reached
+                        for id in video_ids:
+                            self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
+                        return
+
+            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+                for id in video_ids:
+                    self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
+                return
+
+            pagenum = pagenum + 1
 
 
 
 
 class YahooSearchIE(InfoExtractor):
 class YahooSearchIE(InfoExtractor):
-	"""Information Extractor for Yahoo! Video search queries."""
-	_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
-	_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
-	_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
-	_MORE_PAGES_INDICATOR = r'\s*Next'
-	_max_yahoo_results = 1000
-	IE_NAME = u'video.yahoo:search'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_page(self, query, pagenum):
-		"""Report attempt to download playlist page with given number."""
-		query = query.decode(preferredencoding())
-		self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
-
-	def _real_extract(self, query):
-		mobj = re.match(self._VALID_URL, query)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
-			return
-
-		prefix, query = query.split(':')
-		prefix = prefix[8:]
-		query = query.encode('utf-8')
-		if prefix == '':
-			self._download_n_results(query, 1)
-			return
-		elif prefix == 'all':
-			self._download_n_results(query, self._max_yahoo_results)
-			return
-		else:
-			try:
-				n = long(prefix)
-				if n <= 0:
-					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
-					return
-				elif n > self._max_yahoo_results:
-					self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
-					n = self._max_yahoo_results
-				self._download_n_results(query, n)
-				return
-			except ValueError: # parsing prefix as integer fails
-				self._download_n_results(query, 1)
-				return
-
-	def _download_n_results(self, query, n):
-		"""Downloads a specified number of results for a query"""
-
-		video_ids = []
-		already_seen = set()
-		pagenum = 1
-
-		while True:
-			self.report_download_page(query, pagenum)
-			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
-			request = urllib2.Request(result_url)
-			try:
-				page = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
-
-			# Extract video identifiers
-			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-				video_id = mobj.group(1)
-				if video_id not in already_seen:
-					video_ids.append(video_id)
-					already_seen.add(video_id)
-					if len(video_ids) == n:
-						# Specified n videos reached
-						for id in video_ids:
-							self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
-						return
-
-			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-				for id in video_ids:
-					self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
-				return
-
-			pagenum = pagenum + 1
+    """Information Extractor for Yahoo! Video search queries."""
+
+    _WORKING = False
+    _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
+    _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
+    _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
+    _MORE_PAGES_INDICATOR = r'\s*Next'
+    _max_yahoo_results = 1000
+    IE_NAME = u'video.yahoo:search'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_page(self, query, pagenum):
+        """Report attempt to download playlist page with given number."""
+        query = query.decode(preferredencoding())
+        self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
+
+    def _real_extract(self, query):
+        mobj = re.match(self._VALID_URL, query)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+            return
+
+        prefix, query = query.split(':')
+        prefix = prefix[8:]
+        query = query.encode('utf-8')
+        if prefix == '':
+            self._download_n_results(query, 1)
+            return
+        elif prefix == 'all':
+            self._download_n_results(query, self._max_yahoo_results)
+            return
+        else:
+            try:
+                n = int(prefix)
+                if n <= 0:
+                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+                    return
+                elif n > self._max_yahoo_results:
+                    self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
+                    n = self._max_yahoo_results
+                self._download_n_results(query, n)
+                return
+            except ValueError: # parsing prefix as integer fails
+                self._download_n_results(query, 1)
+                return
+
+    def _download_n_results(self, query, n):
+        """Downloads a specified number of results for a query"""
+
+        video_ids = []
+        already_seen = set()
+        pagenum = 1
+
+        while True:
+            self.report_download_page(query, pagenum)
+            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum)
+            request = compat_urllib_request.Request(result_url)
+            try:
+                page = compat_urllib_request.urlopen(request).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
+
+            # Extract video identifiers
+            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                video_id = mobj.group(1)
+                if video_id not in already_seen:
+                    video_ids.append(video_id)
+                    already_seen.add(video_id)
+                    if len(video_ids) == n:
+                        # Specified n videos reached
+                        for id in video_ids:
+                            self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
+                        return
+
+            if re.search(self._MORE_PAGES_INDICATOR, page) is None:
+                for id in video_ids:
+                    self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
+                return
+
+            pagenum = pagenum + 1
 
 
 
 
 class YoutubePlaylistIE(InfoExtractor):
 class YoutubePlaylistIE(InfoExtractor):
-	"""Information Extractor for YouTube playlists."""
-
-	_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
-	_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
-	_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s'
-	_MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
-	IE_NAME = u'youtube:playlist'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_page(self, playlist_id, pagenum):
-		"""Report attempt to download playlist page with given number."""
-		self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
-
-	def _real_extract(self, url):
-		# Extract playlist id
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
-			return
-
-		# Single video case
-		if mobj.group(3) is not None:
-			self._downloader.download([mobj.group(3)])
-			return
-
-		# Download playlist pages
-		# prefix is 'p' as default for playlists but there are other types that need extra care
-		playlist_prefix = mobj.group(1)
-		if playlist_prefix == 'a':
-			playlist_access = 'artist'
-		else:
-			playlist_prefix = 'p'
-			playlist_access = 'view_play_list'
-		playlist_id = mobj.group(2)
-		video_ids = []
-		pagenum = 1
-
-		while True:
-			self.report_download_page(playlist_id, pagenum)
-			url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
-			request = urllib2.Request(url)
-			try:
-				page = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
-
-			# Extract video identifiers
-			ids_in_page = []
-			for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
-				if mobj.group(1) not in ids_in_page:
-					ids_in_page.append(mobj.group(1))
-			video_ids.extend(ids_in_page)
-
-			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-				break
-			pagenum = pagenum + 1
-
-		playliststart = self._downloader.params.get('playliststart', 1) - 1
-		playlistend = self._downloader.params.get('playlistend', -1)
-		if playlistend == -1:
-			video_ids = video_ids[playliststart:]
-		else:
-			video_ids = video_ids[playliststart:playlistend]
-
-		for id in video_ids:
-			self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
-		return
+    """Information Extractor for YouTube playlists."""
+
+    _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*'
+    _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
+    _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s'
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+    IE_NAME = u'youtube:playlist'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_page(self, playlist_id, pagenum):
+        """Report attempt to download playlist page with given number."""
+        self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
+
+    def _real_extract(self, url):
+        # Extract playlist id
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            return
+
+        # Single video case
+        if mobj.group(3) is not None:
+            self._downloader.download([mobj.group(3)])
+            return
+
+        # Download playlist pages
+        # prefix is 'p' as default for playlists but there are other types that need extra care
+        playlist_prefix = mobj.group(1)
+        if playlist_prefix == 'a':
+            playlist_access = 'artist'
+        else:
+            playlist_prefix = 'p'
+            playlist_access = 'view_play_list'
+        playlist_id = mobj.group(2)
+        video_ids = []
+        pagenum = 1
+
+        while True:
+            self.report_download_page(playlist_id, pagenum)
+            url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
+            request = compat_urllib_request.Request(url)
+            try:
+                page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
+
+            # Extract video identifiers
+            ids_in_page = []
+            for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(mobj.group(1))
+            video_ids.extend(ids_in_page)
+
+            if self._MORE_PAGES_INDICATOR not in page:
+                break
+            pagenum = pagenum + 1
+
+        total = len(video_ids)
+
+        playliststart = self._downloader.params.get('playliststart', 1) - 1
+        playlistend = self._downloader.params.get('playlistend', -1)
+        if playlistend == -1:
+            video_ids = video_ids[playliststart:]
+        else:
+            video_ids = video_ids[playliststart:playlistend]
+
+        if len(video_ids) == total:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
+        else:
+            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids)))
+
+        for id in video_ids:
+            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
+        return
 
 
 
 
 class YoutubeChannelIE(InfoExtractor):
 class YoutubeChannelIE(InfoExtractor):
-	"""Information Extractor for YouTube channels."""
-
-	_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
-	_TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
-	_MORE_PAGES_INDICATOR = r'yt-uix-button-content">Next' # TODO
-	IE_NAME = u'youtube:channel'
-
-	def report_download_page(self, channel_id, pagenum):
-		"""Report attempt to download channel page with given number."""
-		self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
-
-	def _real_extract(self, url):
-		# Extract channel id
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
-			return
-
-		# Download channel pages
-		channel_id = mobj.group(1)
-		video_ids = []
-		pagenum = 1
-
-		while True:
-			self.report_download_page(channel_id, pagenum)
-			url = self._TEMPLATE_URL % (channel_id, pagenum)
-			request = urllib2.Request(url)
-			try:
-				page = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
-
-			# Extract video identifiers
-			ids_in_page = []
-			for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page):
-				if mobj.group(1) not in ids_in_page:
-					ids_in_page.append(mobj.group(1))
-			video_ids.extend(ids_in_page)
-
-			if re.search(self._MORE_PAGES_INDICATOR, page) is None:
-				break
-			pagenum = pagenum + 1
-
-		for id in video_ids:
-			self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
-		return
+    """Information Extractor for YouTube channels."""
+
+    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
+    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
+    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+    IE_NAME = u'youtube:channel'
+
+    def report_download_page(self, channel_id, pagenum):
+        """Report attempt to download channel page with given number."""
+        self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
+
+    def _real_extract(self, url):
+        # Extract channel id
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            return
+
+        # Download channel pages
+        channel_id = mobj.group(1)
+        video_ids = []
+        pagenum = 1
+
+        while True:
+            self.report_download_page(channel_id, pagenum)
+            url = self._TEMPLATE_URL % (channel_id, pagenum)
+            request = compat_urllib_request.Request(url)
+            try:
+                page = compat_urllib_request.urlopen(request).read().decode('utf8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
+
+            # Extract video identifiers
+            ids_in_page = []
+            for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(mobj.group(1))
+            video_ids.extend(ids_in_page)
+
+            if self._MORE_PAGES_INDICATOR not in page:
+                break
+            pagenum = pagenum + 1
+
+        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+
+        for id in video_ids:
+            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
+        return
 
 
 
 
 class YoutubeUserIE(InfoExtractor):
 class YoutubeUserIE(InfoExtractor):
-	"""Information Extractor for YouTube users."""
+    """Information Extractor for YouTube users."""
 
 
-	_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
-	_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
-	_GDATA_PAGE_SIZE = 50
-	_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
-	_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
-	IE_NAME = u'youtube:user'
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
+    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
+    _GDATA_PAGE_SIZE = 50
+    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
+    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
+    IE_NAME = u'youtube:user'
 
 
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
 
 
-	def report_download_page(self, username, start_index):
-		"""Report attempt to download user page."""
-		self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
-				(username, start_index, start_index + self._GDATA_PAGE_SIZE))
+    def report_download_page(self, username, start_index):
+        """Report attempt to download user page."""
+        self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
+                (username, start_index, start_index + self._GDATA_PAGE_SIZE))
 
 
-	def _real_extract(self, url):
-		# Extract username
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
-			return
+    def _real_extract(self, url):
+        # Extract username
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            return
 
 
-		username = mobj.group(1)
+        username = mobj.group(1)
 
 
-		# Download video ids using YouTube Data API. Result size per
-		# query is limited (currently to 50 videos) so we need to query
-		# page by page until there are no video ids - it means we got
-		# all of them.
+        # Download video ids using YouTube Data API. Result size per
+        # query is limited (currently to 50 videos) so we need to query
+        # page by page until there are no video ids - it means we got
+        # all of them.
 
 
-		video_ids = []
-		pagenum = 0
+        video_ids = []
+        pagenum = 0
 
 
-		while True:
-			start_index = pagenum * self._GDATA_PAGE_SIZE + 1
-			self.report_download_page(username, start_index)
+        while True:
+            start_index = pagenum * self._GDATA_PAGE_SIZE + 1
+            self.report_download_page(username, start_index)
 
 
-			request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
+            request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
 
 
-			try:
-				page = urllib2.urlopen(request).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
+            try:
+                page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
 
 
-			# Extract video identifiers
-			ids_in_page = []
+            # Extract video identifiers
+            ids_in_page = []
 
 
-			for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-				if mobj.group(1) not in ids_in_page:
-					ids_in_page.append(mobj.group(1))
+            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(mobj.group(1))
 
 
-			video_ids.extend(ids_in_page)
+            video_ids.extend(ids_in_page)
 
 
-			# A little optimization - if current page is not
-			# "full", ie. does not contain PAGE_SIZE video ids then
-			# we can assume that this page is the last one - there
-			# are no more ids on further pages - no need to query
-			# again.
+            # A little optimization - if current page is not
+            # "full", ie. does not contain PAGE_SIZE video ids then
+            # we can assume that this page is the last one - there
+            # are no more ids on further pages - no need to query
+            # again.
 
 
-			if len(ids_in_page) < self._GDATA_PAGE_SIZE:
-				break
+            if len(ids_in_page) < self._GDATA_PAGE_SIZE:
+                break
 
 
-			pagenum += 1
+            pagenum += 1
 
 
-		all_ids_count = len(video_ids)
-		playliststart = self._downloader.params.get('playliststart', 1) - 1
-		playlistend = self._downloader.params.get('playlistend', -1)
+        all_ids_count = len(video_ids)
+        playliststart = self._downloader.params.get('playliststart', 1) - 1
+        playlistend = self._downloader.params.get('playlistend', -1)
 
 
-		if playlistend == -1:
-			video_ids = video_ids[playliststart:]
-		else:
-			video_ids = video_ids[playliststart:playlistend]
+        if playlistend == -1:
+            video_ids = video_ids[playliststart:]
+        else:
+            video_ids = video_ids[playliststart:playlistend]
 
 
-		self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
-				(username, all_ids_count, len(video_ids)))
+        self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
+                (username, all_ids_count, len(video_ids)))
 
 
-		for video_id in video_ids:
-			self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+        for video_id in video_ids:
+            self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
 
 
 
 
 class BlipTVUserIE(InfoExtractor):
 class BlipTVUserIE(InfoExtractor):
-	"""Information Extractor for blip.tv users."""
+    """Information Extractor for blip.tv users."""
 
 
-	_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
-	_PAGE_SIZE = 12
-	IE_NAME = u'blip.tv:user'
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+    _PAGE_SIZE = 12
+    IE_NAME = u'blip.tv:user'
 
 
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
 
 
-	def report_download_page(self, username, pagenum):
-		"""Report attempt to download user page."""
-		self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
-				(self.IE_NAME, username, pagenum))
+    def report_download_page(self, username, pagenum):
+        """Report attempt to download user page."""
+        self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
+                (self.IE_NAME, username, pagenum))
 
 
-	def _real_extract(self, url):
-		# Extract username
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid url: %s' % url)
-			return
+    def _real_extract(self, url):
+        # Extract username
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+            return
 
 
-		username = mobj.group(1)
+        username = mobj.group(1)
 
 
-		page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
+        page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
 
 
-		request = urllib2.Request(url)
+        request = compat_urllib_request.Request(url)
 
 
-		try:
-			page = urllib2.urlopen(request).read().decode('utf-8')
-			mobj = re.search(r'data-users-id="([^"]+)"', page)
-			page_base = page_base % mobj.group(1)
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-			return
+        try:
+            page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            mobj = re.search(r'data-users-id="([^"]+)"', page)
+            page_base = page_base % mobj.group(1)
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            return
 
 
 
 
-		# Download video ids using BlipTV Ajax calls. Result size per
-		# query is limited (currently to 12 videos) so we need to query
-		# page by page until there are no video ids - it means we got
-		# all of them.
+        # Download video ids using BlipTV Ajax calls. Result size per
+        # query is limited (currently to 12 videos) so we need to query
+        # page by page until there are no video ids - it means we got
+        # all of them.
 
 
-		video_ids = []
-		pagenum = 1
+        video_ids = []
+        pagenum = 1
 
 
-		while True:
-			self.report_download_page(username, pagenum)
+        while True:
+            self.report_download_page(username, pagenum)
 
 
-			request = urllib2.Request( page_base + "&page=" + str(pagenum) )
+            request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) )
 
 
-			try:
-				page = urllib2.urlopen(request).read().decode('utf-8')
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
-				return
+            try:
+                page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+                return
 
 
-			# Extract video identifiers
-			ids_in_page = []
+            # Extract video identifiers
+            ids_in_page = []
 
 
-			for mobj in re.finditer(r'href="/([^"]+)"', page):
-				if mobj.group(1) not in ids_in_page:
-					ids_in_page.append(unescapeHTML(mobj.group(1)))
+            for mobj in re.finditer(r'href="/([^"]+)"', page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(unescapeHTML(mobj.group(1)))
 
 
-			video_ids.extend(ids_in_page)
+            video_ids.extend(ids_in_page)
 
 
-			# A little optimization - if current page is not
-			# "full", ie. does not contain PAGE_SIZE video ids then
-			# we can assume that this page is the last one - there
-			# are no more ids on further pages - no need to query
-			# again.
+            # A little optimization - if current page is not
+            # "full", ie. does not contain PAGE_SIZE video ids then
+            # we can assume that this page is the last one - there
+            # are no more ids on further pages - no need to query
+            # again.
 
 
-			if len(ids_in_page) < self._PAGE_SIZE:
-				break
+            if len(ids_in_page) < self._PAGE_SIZE:
+                break
 
 
-			pagenum += 1
+            pagenum += 1
 
 
-		all_ids_count = len(video_ids)
-		playliststart = self._downloader.params.get('playliststart', 1) - 1
-		playlistend = self._downloader.params.get('playlistend', -1)
+        all_ids_count = len(video_ids)
+        playliststart = self._downloader.params.get('playliststart', 1) - 1
+        playlistend = self._downloader.params.get('playlistend', -1)
 
 
-		if playlistend == -1:
-			video_ids = video_ids[playliststart:]
-		else:
-			video_ids = video_ids[playliststart:playlistend]
+        if playlistend == -1:
+            video_ids = video_ids[playliststart:]
+        else:
+            video_ids = video_ids[playliststart:playlistend]
 
 
-		self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
-				(self.IE_NAME, username, all_ids_count, len(video_ids)))
+        self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
+                (self.IE_NAME, username, all_ids_count, len(video_ids)))
 
 
-		for video_id in video_ids:
-			self._downloader.download([u'http://blip.tv/'+video_id])
+        for video_id in video_ids:
+            self._downloader.download([u'http://blip.tv/'+video_id])
 
 
 
 
 class DepositFilesIE(InfoExtractor):
 class DepositFilesIE(InfoExtractor):
-	"""Information extractor for depositfiles.com"""
-
-	_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
-	IE_NAME = u'DepositFiles'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, file_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
-
-	def report_extraction(self, file_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
-
-	def _real_extract(self, url):
-		file_id = url.split('/')[-1]
-		# Rebuild url in english locale
-		url = 'http://depositfiles.com/en/files/' + file_id
-
-		# Retrieve file webpage with 'Free download' button pressed
-		free_download_indication = { 'gateway_result' : '1' }
-		request = urllib2.Request(url, urllib.urlencode(free_download_indication))
-		try:
-			self.report_download_webpage(file_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
-			return
-
-		# Search for the real file URL
-		mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
-		if (mobj is None) or (mobj.group(1) is None):
-			# Try to figure out reason of the error.
-			mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
-			if (mobj is not None) and (mobj.group(1) is not None):
-				restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-				self._downloader.trouble(u'ERROR: %s' % restriction_message)
-			else:
-				self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
-			return
-
-		file_url = mobj.group(1)
-		file_extension = os.path.splitext(file_url)[1][1:]
-
-		# Search for file title
-		mobj = re.search(r'<b title="(.*?)">', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-		file_title = mobj.group(1).decode('utf-8')
-
-		return [{
-			'id':		file_id.decode('utf-8'),
-			'url':		file_url.decode('utf-8'),
-			'uploader':	u'NA',
-			'upload_date':	u'NA',
-			'title':	file_title,
-			'ext':		file_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Information extractor for depositfiles.com"""
+
+    _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
+
+    def report_download_webpage(self, file_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
+
+    def report_extraction(self, file_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
+
+    def _real_extract(self, url):
+        file_id = url.split('/')[-1]
+        # Rebuild url in english locale
+        url = 'http://depositfiles.com/en/files/' + file_id
+
+        # Retrieve file webpage with 'Free download' button pressed
+        free_download_indication = { 'gateway_result' : '1' }
+        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
+        try:
+            self.report_download_webpage(file_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
+            return
+
+        # Search for the real file URL
+        mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
+        if (mobj is None) or (mobj.group(1) is None):
+            # Try to figure out reason of the error.
+            mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
+            if (mobj is not None) and (mobj.group(1) is not None):
+                restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
+                self._downloader.trouble(u'ERROR: %s' % restriction_message)
+            else:
+                self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+            return
+
+        file_url = mobj.group(1)
+        file_extension = os.path.splitext(file_url)[1][1:]
+
+        # Search for file title
+        mobj = re.search(r'<b title="(.*?)">', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+        file_title = mobj.group(1).decode('utf-8')
+
+        return [{
+            'id':       file_id.decode('utf-8'),
+            'url':      file_url.decode('utf-8'),
+            'uploader': None,
+            'upload_date':  None,
+            'title':    file_title,
+            'ext':      file_extension.decode('utf-8'),
+        }]
 
 
 
 
 class FacebookIE(InfoExtractor):
 class FacebookIE(InfoExtractor):
-	"""Information Extractor for Facebook"""
-
-	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
-	_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
-	_NETRC_MACHINE = 'facebook'
-	_available_formats = ['video', 'highqual', 'lowqual']
-	_video_extensions = {
-		'video': 'mp4',
-		'highqual': 'mp4',
-		'lowqual': 'mp4',
-	}
-	IE_NAME = u'facebook'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def _reporter(self, message):
-		"""Add header and report message."""
-		self._downloader.to_screen(u'[facebook] %s' % message)
-
-	def report_login(self):
-		"""Report attempt to log in."""
-		self._reporter(u'Logging in')
-
-	def report_video_webpage_download(self, video_id):
-		"""Report attempt to download video webpage."""
-		self._reporter(u'%s: Downloading video webpage' % video_id)
-
-	def report_information_extraction(self, video_id):
-		"""Report attempt to extract video information."""
-		self._reporter(u'%s: Extracting video information' % video_id)
-
-	def _parse_page(self, video_webpage):
-		"""Extract video information from page"""
-		# General data
-		data = {'title': r'\("video_title", "(.*?)"\)',
-			'description': r'<div class="datawrap">(.*?)</div>',
-			'owner': r'\("video_owner_name", "(.*?)"\)',
-			'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
-			}
-		video_info = {}
-		for piece in data.keys():
-			mobj = re.search(data[piece], video_webpage)
-			if mobj is not None:
-				video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
-
-		# Video urls
-		video_urls = {}
-		for fmt in self._available_formats:
-			mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
-			if mobj is not None:
-				# URL is in a Javascript segment inside an escaped Unicode format within
-				# the generally utf-8 page
-				video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
-		video_info['video_urls'] = video_urls
-
-		return video_info
-
-	def _real_initialize(self):
-		if self._downloader is None:
-			return
-
-		useremail = None
-		password = None
-		downloader_params = self._downloader.params
-
-		# Attempt to use provided username and password or .netrc data
-		if downloader_params.get('username', None) is not None:
-			useremail = downloader_params['username']
-			password = downloader_params['password']
-		elif downloader_params.get('usenetrc', False):
-			try:
-				info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-				if info is not None:
-					useremail = info[0]
-					password = info[2]
-				else:
-					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-			except (IOError, netrc.NetrcParseError), err:
-				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
-				return
-
-		if useremail is None:
-			return
-
-		# Log in
-		login_form = {
-			'email': useremail,
-			'pass': password,
-			'login': 'Log+In'
-			}
-		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
-		try:
-			self.report_login()
-			login_results = urllib2.urlopen(request).read()
-			if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
-				self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
-				return
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
-			return
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group('ID')
-
-		# Get video webpage
-		self.report_video_webpage_download(video_id)
-		request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
-		try:
-			page = urllib2.urlopen(request)
-			video_webpage = page.read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		# Start extracting information
-		self.report_information_extraction(video_id)
-
-		# Extract information
-		video_info = self._parse_page(video_webpage)
-
-		# uploader
-		if 'owner' not in video_info:
-			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
-			return
-		video_uploader = video_info['owner']
-
-		# title
-		if 'title' not in video_info:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = video_info['title']
-		video_title = video_title.decode('utf-8')
-
-		# thumbnail image
-		if 'thumbnail' not in video_info:
-			self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
-			video_thumbnail = ''
-		else:
-			video_thumbnail = video_info['thumbnail']
-
-		# upload date
-		upload_date = u'NA'
-		if 'upload_date' in video_info:
-			upload_time = video_info['upload_date']
-			timetuple = email.utils.parsedate_tz(upload_time)
-			if timetuple is not None:
-				try:
-					upload_date = time.strftime('%Y%m%d', timetuple[0:9])
-				except:
-					pass
-
-		# description
-		video_description = video_info.get('description', 'No description available.')
-
-		url_map = video_info['video_urls']
-		if len(url_map.keys()) > 0:
-			# Decide which formats to download
-			req_format = self._downloader.params.get('format', None)
-			format_limit = self._downloader.params.get('format_limit', None)
-
-			if format_limit is not None and format_limit in self._available_formats:
-				format_list = self._available_formats[self._available_formats.index(format_limit):]
-			else:
-				format_list = self._available_formats
-			existing_formats = [x for x in format_list if x in url_map]
-			if len(existing_formats) == 0:
-				self._downloader.trouble(u'ERROR: no known formats available for video')
-				return
-			if req_format is None:
-				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-			elif req_format == 'worst':
-				video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
-			elif req_format == '-1':
-				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-			else:
-				# Specific format
-				if req_format not in url_map:
-					self._downloader.trouble(u'ERROR: requested format not available')
-					return
-				video_url_list = [(req_format, url_map[req_format])] # Specific format
-
-		results = []
-		for format_param, video_real_url in video_url_list:
-			# Extension
-			video_extension = self._video_extensions.get(format_param, 'mp4')
-
-			results.append({
-				'id':		video_id.decode('utf-8'),
-				'url':		video_real_url.decode('utf-8'),
-				'uploader':	video_uploader.decode('utf-8'),
-				'upload_date':	upload_date,
-				'title':	video_title,
-				'ext':		video_extension.decode('utf-8'),
-				'format':	(format_param is None and u'NA' or format_param.decode('utf-8')),
-				'thumbnail':	video_thumbnail.decode('utf-8'),
-				'description':	video_description.decode('utf-8'),
-				'player_url':	None,
-			})
-		return results
+    """Information Extractor for Facebook"""
+
+    _WORKING = False
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+    _NETRC_MACHINE = 'facebook'
+    _available_formats = ['video', 'highqual', 'lowqual']
+    _video_extensions = {
+        'video': 'mp4',
+        'highqual': 'mp4',
+        'lowqual': 'mp4',
+    }
+    IE_NAME = u'facebook'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def _reporter(self, message):
+        """Add header and report message."""
+        self._downloader.to_screen(u'[facebook] %s' % message)
+
+    def report_login(self):
+        """Report attempt to log in."""
+        self._reporter(u'Logging in')
+
+    def report_video_webpage_download(self, video_id):
+        """Report attempt to download video webpage."""
+        self._reporter(u'%s: Downloading video webpage' % video_id)
+
+    def report_information_extraction(self, video_id):
+        """Report attempt to extract video information."""
+        self._reporter(u'%s: Extracting video information' % video_id)
+
+    def _parse_page(self, video_webpage):
+        """Extract video information from page"""
+        # General data
+        data = {'title': r'\("video_title", "(.*?)"\)',
+            'description': r'<div class="datawrap">(.*?)</div>',
+            'owner': r'\("video_owner_name", "(.*?)"\)',
+            'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)',
+            }
+        video_info = {}
+        for piece in data.keys():
+            mobj = re.search(data[piece], video_webpage)
+            if mobj is not None:
+                video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
+
+        # Video urls
+        video_urls = {}
+        for fmt in self._available_formats:
+            mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
+            if mobj is not None:
+                # URL is in a Javascript segment inside an escaped Unicode format within
+                # the generally utf-8 page
+                video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape"))
+        video_info['video_urls'] = video_urls
+
+        return video_info
+
+    def _real_initialize(self):
+        if self._downloader is None:
+            return
+
+        useremail = None
+        password = None
+        downloader_params = self._downloader.params
+
+        # Attempt to use provided username and password or .netrc data
+        if downloader_params.get('username', None) is not None:
+            useremail = downloader_params['username']
+            password = downloader_params['password']
+        elif downloader_params.get('usenetrc', False):
+            try:
+                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                if info is not None:
+                    useremail = info[0]
+                    password = info[2]
+                else:
+                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+            except (IOError, netrc.NetrcParseError) as err:
+                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
+                return
+
+        if useremail is None:
+            return
+
+        # Log in
+        login_form = {
+            'email': useremail,
+            'pass': password,
+            'login': 'Log+In'
+            }
+        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        try:
+            self.report_login()
+            login_results = compat_urllib_request.urlopen(request).read()
+            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
+                self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+                return
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
+            return
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group('ID')
+
+        # Get video webpage
+        self.report_video_webpage_download(video_id)
+        request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
+        try:
+            page = compat_urllib_request.urlopen(request)
+            video_webpage = page.read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            return
+
+        # Start extracting information
+        self.report_information_extraction(video_id)
+
+        # Extract information
+        video_info = self._parse_page(video_webpage)
+
+        # uploader
+        if 'owner' not in video_info:
+            self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+            return
+        video_uploader = video_info['owner']
+
+        # title
+        if 'title' not in video_info:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = video_info['title']
+        video_title = video_title.decode('utf-8')
+
+        # thumbnail image
+        if 'thumbnail' not in video_info:
+            self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+            video_thumbnail = ''
+        else:
+            video_thumbnail = video_info['thumbnail']
+
+        # upload date
+        upload_date = None
+        if 'upload_date' in video_info:
+            upload_time = video_info['upload_date']
+            timetuple = email.utils.parsedate_tz(upload_time)
+            if timetuple is not None:
+                try:
+                    upload_date = time.strftime('%Y%m%d', timetuple[0:9])
+                except:
+                    pass
+
+        # description
+        video_description = video_info.get('description', 'No description available.')
+
+        url_map = video_info['video_urls']
+        if url_map:
+            # Decide which formats to download
+            req_format = self._downloader.params.get('format', None)
+            format_limit = self._downloader.params.get('format_limit', None)
+
+            if format_limit is not None and format_limit in self._available_formats:
+                format_list = self._available_formats[self._available_formats.index(format_limit):]
+            else:
+                format_list = self._available_formats
+            existing_formats = [x for x in format_list if x in url_map]
+            if len(existing_formats) == 0:
+                self._downloader.trouble(u'ERROR: no known formats available for video')
+                return
+            if req_format is None:
+                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+            elif req_format == 'worst':
+                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+            elif req_format == '-1':
+                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+            else:
+                # Specific format
+                if req_format not in url_map:
+                    self._downloader.trouble(u'ERROR: requested format not available')
+                    return
+                video_url_list = [(req_format, url_map[req_format])] # Specific format
+
+        results = []
+        for format_param, video_real_url in video_url_list:
+            # Extension
+            video_extension = self._video_extensions.get(format_param, 'mp4')
+
+            results.append({
+                'id':       video_id.decode('utf-8'),
+                'url':      video_real_url.decode('utf-8'),
+                'uploader': video_uploader.decode('utf-8'),
+                'upload_date':  upload_date,
+                'title':    video_title,
+                'ext':      video_extension.decode('utf-8'),
+                'format':   (format_param is None and u'NA' or format_param.decode('utf-8')),
+                'thumbnail':    video_thumbnail.decode('utf-8'),
+                'description':  video_description.decode('utf-8'),
+            })
+        return results
 
 
 class BlipTVIE(InfoExtractor):
 class BlipTVIE(InfoExtractor):
-	"""Information extractor for blip.tv"""
-
-	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
-	_URL_EXT = r'^.*\.([a-z0-9]+)$'
-	IE_NAME = u'blip.tv'
-
-	def report_extraction(self, file_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
-
-	def report_direct_download(self, title):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		if '?' in url:
-			cchar = '&'
-		else:
-			cchar = '?'
-		json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
-		request = urllib2.Request(json_url.encode('utf-8'))
-		self.report_extraction(mobj.group(1))
-		info = None
-		try:
-			urlh = urllib2.urlopen(request)
-			if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
-				basename = url.split('/')[-1]
-				title,ext = os.path.splitext(basename)
-				title = title.decode('UTF-8')
-				ext = ext.replace('.', '')
-				self.report_direct_download(title)
-				info = {
-					'id': title,
-					'url': url,
-					'title': title,
-					'ext': ext,
-					'urlhandle': urlh
-				}
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
-			return
-		if info is None: # Regular URL
-			try:
-				json_code = urlh.read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err))
-				return
-
-			try:
-				json_data = json.loads(json_code)
-				if 'Post' in json_data:
-					data = json_data['Post']
-				else:
-					data = json_data
-
-				upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
-				video_url = data['media']['url']
-				umobj = re.match(self._URL_EXT, video_url)
-				if umobj is None:
-					raise ValueError('Can not determine filename extension')
-				ext = umobj.group(1)
-
-				info = {
-					'id': data['item_id'],
-					'url': video_url,
-					'uploader': data['display_name'],
-					'upload_date': upload_date,
-					'title': data['title'],
-					'ext': ext,
-					'format': data['media']['mimeType'],
-					'thumbnail': data['thumbnailUrl'],
-					'description': data['description'],
-					'player_url': data['embedUrl']
-				}
-			except (ValueError,KeyError), err:
-				self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
-				return
-
-		std_headers['User-Agent'] = 'iTunes/10.6.1'
-		return [info]
+    """Information extractor for blip.tv"""
+
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
+    _URL_EXT = r'^.*\.([a-z0-9]+)$'
+    IE_NAME = u'blip.tv'
+
+    def report_extraction(self, file_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+    def report_direct_download(self, title):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        if '?' in url:
+            cchar = '&'
+        else:
+            cchar = '?'
+        json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
+        request = compat_urllib_request.Request(json_url)
+        self.report_extraction(mobj.group(1))
+        info = None
+        try:
+            urlh = compat_urllib_request.urlopen(request)
+            if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
+                basename = url.split('/')[-1]
+                title,ext = os.path.splitext(basename)
+                title = title.decode('UTF-8')
+                ext = ext.replace('.', '')
+                self.report_direct_download(title)
+                info = {
+                    'id': title,
+                    'url': url,
+                    'uploader': None,
+                    'upload_date': None,
+                    'title': title,
+                    'ext': ext,
+                    'urlhandle': urlh
+                }
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+            return
+        if info is None: # Regular URL
+            try:
+                json_code_bytes = urlh.read()
+                json_code = json_code_bytes.decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
+                return
+
+            try:
+                json_data = json.loads(json_code)
+                if 'Post' in json_data:
+                    data = json_data['Post']
+                else:
+                    data = json_data
+
+                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+                video_url = data['media']['url']
+                umobj = re.match(self._URL_EXT, video_url)
+                if umobj is None:
+                    raise ValueError('Can not determine filename extension')
+                ext = umobj.group(1)
+
+                info = {
+                    'id': data['item_id'],
+                    'url': video_url,
+                    'uploader': data['display_name'],
+                    'upload_date': upload_date,
+                    'title': data['title'],
+                    'ext': ext,
+                    'format': data['media']['mimeType'],
+                    'thumbnail': data['thumbnailUrl'],
+                    'description': data['description'],
+                    'player_url': data['embedUrl']
+                }
+            except (ValueError,KeyError) as err:
+                self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+                return
+
+        std_headers['User-Agent'] = 'iTunes/10.6.1'
+        return [info]
 
 
 
 
 class MyVideoIE(InfoExtractor):
 class MyVideoIE(InfoExtractor):
-	"""Information Extractor for myvideo.de."""
-
-	_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
-	IE_NAME = u'myvideo'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-	
-	def report_download_webpage(self, video_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
-
-	def _real_extract(self,url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._download.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group(1)
-
-		# Get video webpage
-		request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
-		try:
-			self.report_download_webpage(video_id)
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		self.report_extraction(video_id)
-		mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
-				 webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract media URL')
-			return
-		video_url = mobj.group(1) + ('/%s.flv' % video_id)
-
-		mobj = re.search('<title>([^<]+)</title>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract title')
-			return
-
-		video_title = mobj.group(1)
-
-		return [{
-			'id':		video_id,
-			'url':		video_url,
-			'uploader':	u'NA',
-			'upload_date':  u'NA',
-			'title':	video_title,
-			'ext':		u'flv',
-			'format':	u'NA',
-			'player_url':	None,
-		}]
+    """Information Extractor for myvideo.de."""
+
+    _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
+    IE_NAME = u'myvideo'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._download.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1)
+
+        # Get video webpage
+        webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        self.report_extraction(video_id)
+        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
+                 webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract media URL')
+            return
+        video_url = mobj.group(1) + ('/%s.flv' % video_id)
+
+        mobj = re.search('<title>([^<]+)</title>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract title')
+            return
+
+        video_title = mobj.group(1)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': None,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      u'flv',
+        }]
 
 
 class ComedyCentralIE(InfoExtractor):
 class ComedyCentralIE(InfoExtractor):
-	"""Information extractor for The Daily Show and Colbert Report """
-
-	_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
-	IE_NAME = u'comedycentral'
-
-	def report_extraction(self, episode_id):
-		self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
-
-	def report_config_download(self, episode_id):
-		self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
-
-	def report_index_download(self, episode_id):
-		self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
-
-	def report_player_url(self, episode_id):
-		self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		if mobj.group('shortname'):
-			if mobj.group('shortname') in ('tds', 'thedailyshow'):
-				url = u'http://www.thedailyshow.com/full-episodes/'
-			else:
-				url = u'http://www.colbertnation.com/full-episodes/'
-			mobj = re.match(self._VALID_URL, url)
-			assert mobj is not None
-
-		dlNewest = not mobj.group('episode')
-		if dlNewest:
-			epTitle = mobj.group('showname')
-		else:
-			epTitle = mobj.group('episode')
-
-		req = urllib2.Request(url)
-		self.report_extraction(epTitle)
-		try:
-			htmlHandle = urllib2.urlopen(req)
-			html = htmlHandle.read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
-			return
-		if dlNewest:
-			url = htmlHandle.geturl()
-			mobj = re.match(self._VALID_URL, url)
-			if mobj is None:
-				self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
-				return
-			if mobj.group('episode') == '':
-				self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
-				return
-			epTitle = mobj.group('episode')
-
-		mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html)
-		if len(mMovieParams) == 0:
-			self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
-			return
-
-		playerUrl_raw = mMovieParams[0][0]
-		self.report_player_url(epTitle)
-		try:
-			urlHandle = urllib2.urlopen(playerUrl_raw)
-			playerUrl = urlHandle.geturl()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
-			return
-
-		uri = mMovieParams[0][1]
-		indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + urllib.urlencode({'uri': uri})
-		self.report_index_download(epTitle)
-		try:
-			indexXml = urllib2.urlopen(indexUrl).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
-			return
-
-		results = []
-
-		idoc = xml.etree.ElementTree.fromstring(indexXml)
-		itemEls = idoc.findall('.//item')
-		for itemEl in itemEls:
-			mediaId = itemEl.findall('./guid')[0].text
-			shortMediaId = mediaId.split(':')[-1]
-			showId = mediaId.split(':')[-2].replace('.com', '')
-			officialTitle = itemEl.findall('./title')[0].text
-			officialDate = itemEl.findall('./pubDate')[0].text
-
-			configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
-						urllib.urlencode({'uri': mediaId}))
-			configReq = urllib2.Request(configUrl)
-			self.report_config_download(epTitle)
-			try:
-				configXml = urllib2.urlopen(configReq).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
-				return
-
-			cdoc = xml.etree.ElementTree.fromstring(configXml)
-			turls = []
-			for rendition in cdoc.findall('.//rendition'):
-				finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
-				turls.append(finfo)
-
-			if len(turls) == 0:
-				self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
-				continue
-
-			# For now, just pick the highest bitrate
-			format,video_url = turls[-1]
-
-			effTitle = showId + u'-' + epTitle
-			info = {
-				'id': shortMediaId,
-				'url': video_url,
-				'uploader': showId,
-				'upload_date': officialDate,
-				'title': effTitle,
-				'ext': 'mp4',
-				'format': format,
-				'thumbnail': None,
-				'description': officialTitle,
-				'player_url': playerUrl
-			}
-
-			results.append(info)
-			
-		return results
+    """Information extractor for The Daily Show and Colbert Report """
+
+    # urls can be abbreviations like :thedailyshow or :colbert
+    # urls for episodes like:
+    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
+    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
+    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
+    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
+                      |(https?://)?(www\.)?
+                          (?P<showname>thedailyshow|colbertnation)\.com/
+                         (full-episodes/(?P<episode>.*)|
+                          (?P<clip>
+                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
+                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
+                     $"""
+    IE_NAME = u'comedycentral'
+
+    _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
+
+    _video_extensions = {
+        '3500': 'mp4',
+        '2200': 'mp4',
+        '1700': 'mp4',
+        '1200': 'mp4',
+        '750': 'mp4',
+        '400': 'mp4',
+    }
+    _video_dimensions = {
+        '3500': '1280x720',
+        '2200': '960x540',
+        '1700': '768x432',
+        '1200': '640x360',
+        '750': '512x288',
+        '400': '384x216',
+    }
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+    def report_extraction(self, episode_id):
+        self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
+
+    def report_config_download(self, episode_id):
+        self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
+
+    def report_index_download(self, episode_id):
+        self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
+
+    def report_player_url(self, episode_id):
+        self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
+
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for x in formats:
+            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
+
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        if mobj.group('shortname'):
+            if mobj.group('shortname') in ('tds', 'thedailyshow'):
+                url = u'http://www.thedailyshow.com/full-episodes/'
+            else:
+                url = u'http://www.colbertnation.com/full-episodes/'
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+            assert mobj is not None
+
+        if mobj.group('clip'):
+            if mobj.group('showname') == 'thedailyshow':
+                epTitle = mobj.group('tdstitle')
+            else:
+                epTitle = mobj.group('cntitle')
+            dlNewest = False
+        else:
+            dlNewest = not mobj.group('episode')
+            if dlNewest:
+                epTitle = mobj.group('showname')
+            else:
+                epTitle = mobj.group('episode')
+
+        req = compat_urllib_request.Request(url)
+        self.report_extraction(epTitle)
+        try:
+            htmlHandle = compat_urllib_request.urlopen(req)
+            html = htmlHandle.read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+            return
+        if dlNewest:
+            url = htmlHandle.geturl()
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+            if mobj is None:
+                self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+                return
+            if mobj.group('episode') == '':
+                self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+                return
+            epTitle = mobj.group('episode')
+
+        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html)
+
+        if len(mMovieParams) == 0:
+            # The Colbert Report embeds the information in a without
+            # a URL prefix; so extract the alternate reference
+            # and then add the URL prefix manually.
+
+            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html)
+            if len(altMovieParams) == 0:
+                self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+                return
+            else:
+                mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
+
+        playerUrl_raw = mMovieParams[0][0]
+        self.report_player_url(epTitle)
+        try:
+            urlHandle = compat_urllib_request.urlopen(playerUrl_raw)
+            playerUrl = urlHandle.geturl()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
+            return
+
+        uri = mMovieParams[0][1]
+        indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
+        self.report_index_download(epTitle)
+        try:
+            indexXml = compat_urllib_request.urlopen(indexUrl).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
+            return
+
+        results = []
+
+        idoc = xml.etree.ElementTree.fromstring(indexXml)
+        itemEls = idoc.findall('.//item')
+        for itemEl in itemEls:
+            mediaId = itemEl.findall('./guid')[0].text
+            shortMediaId = mediaId.split(':')[-1]
+            showId = mediaId.split(':')[-2].replace('.com', '')
+            officialTitle = itemEl.findall('./title')[0].text
+            officialDate = itemEl.findall('./pubDate')[0].text
+
+            configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
+                        compat_urllib_parse.urlencode({'uri': mediaId}))
+            configReq = compat_urllib_request.Request(configUrl)
+            self.report_config_download(epTitle)
+            try:
+                configXml = compat_urllib_request.urlopen(configReq).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+                return
+
+            cdoc = xml.etree.ElementTree.fromstring(configXml)
+            turls = []
+            for rendition in cdoc.findall('.//rendition'):
+                finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
+                turls.append(finfo)
+
+            if len(turls) == 0:
+                self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+                continue
+
+            if self._downloader.params.get('listformats', None):
+                self._print_formats([i[0] for i in turls])
+                return
+
+            # For now, just pick the highest bitrate
+            format,video_url = turls[-1]
+
+            # Get the format arg from the arg stream
+            req_format = self._downloader.params.get('format', None)
+
+            # Select format if we can find one
+            for f,v in turls:
+                if f == req_format:
+                    format, video_url = f, v
+                    break
+
+            # Patch to download from alternative CDN, which does not
+            # break on current RTMPDump builds
+            broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/"
+            better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/"
+
+            if video_url.startswith(broken_cdn):
+                video_url = video_url.replace(broken_cdn, better_cdn)
+
+            effTitle = showId + u'-' + epTitle
+            info = {
+                'id': shortMediaId,
+                'url': video_url,
+                'uploader': showId,
+                'upload_date': officialDate,
+                'title': effTitle,
+                'ext': 'mp4',
+                'format': format,
+                'thumbnail': None,
+                'description': officialTitle,
+                'player_url': None #playerUrl
+            }
+
+            results.append(info)
+
+        return results
 
 
 
 
 class EscapistIE(InfoExtractor):
 class EscapistIE(InfoExtractor):
-	"""Information extractor for The Escapist """
-
-	_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
-	IE_NAME = u'escapist'
-
-	def report_extraction(self, showName):
-		self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
-
-	def report_config_download(self, showName):
-		self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		showName = mobj.group('showname')
-		videoId = mobj.group('episode')
-
-		self.report_extraction(showName)
-		try:
-			webPage = urllib2.urlopen(url)
-			webPageBytes = webPage.read()
-			m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
-			webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
-			return
-
-		descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
-		description = unescapeHTML(descMatch.group(1))
-		imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
-		imgUrl = unescapeHTML(imgMatch.group(1))
-		playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
-		playerUrl = unescapeHTML(playerUrlMatch.group(1))
-		configUrlMatch = re.search('config=(.*)$', playerUrl)
-		configUrl = urllib2.unquote(configUrlMatch.group(1))
-
-		self.report_config_download(showName)
-		try:
-			configJSON = urllib2.urlopen(configUrl).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err))
-			return
-
-		# Technically, it's JavaScript, not JSON
-		configJSON = configJSON.replace("'", '"')
-
-		try:
-			config = json.loads(configJSON)
-		except (ValueError,), err:
-			self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err))
-			return
-
-		playlist = config['playlist']
-		videoUrl = playlist[1]['url']
-
-		info = {
-			'id': videoId,
-			'url': videoUrl,
-			'uploader': showName,
-			'upload_date': None,
-			'title': showName,
-			'ext': 'flv',
-			'format': 'flv',
-			'thumbnail': imgUrl,
-			'description': description,
-			'player_url': playerUrl,
-		}
-
-		return [info]
+    """Information extractor for The Escapist """
+
+    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
+    IE_NAME = u'escapist'
+
+    def report_extraction(self, showName):
+        self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
+
+    def report_config_download(self, showName):
+        self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        showName = mobj.group('showname')
+        videoId = mobj.group('episode')
+
+        self.report_extraction(showName)
+        try:
+            webPage = compat_urllib_request.urlopen(url)
+            webPageBytes = webPage.read()
+            m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
+            webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
+            return
+
+        descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
+        description = unescapeHTML(descMatch.group(1))
+        imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
+        imgUrl = unescapeHTML(imgMatch.group(1))
+        playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
+        playerUrl = unescapeHTML(playerUrlMatch.group(1))
+        configUrlMatch = re.search('config=(.*)$', playerUrl)
+        configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1))
+
+        self.report_config_download(showName)
+        try:
+            configJSON = compat_urllib_request.urlopen(configUrl)
+            m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
+            configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
+            return
+
+        # Technically, it's JavaScript, not JSON
+        configJSON = configJSON.replace("'", '"')
+
+        try:
+            config = json.loads(configJSON)
+        except (ValueError,) as err:
+            self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
+            return
+
+        playlist = config['playlist']
+        videoUrl = playlist[1]['url']
+
+        info = {
+            'id': videoId,
+            'url': videoUrl,
+            'uploader': showName,
+            'upload_date': None,
+            'title': showName,
+            'ext': 'flv',
+            'thumbnail': imgUrl,
+            'description': description,
+            'player_url': playerUrl,
+        }
+
+        return [info]
 
 
 
 
 class CollegeHumorIE(InfoExtractor):
 class CollegeHumorIE(InfoExtractor):
-	"""Information extractor for collegehumor.com"""
-
-	_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
-	IE_NAME = u'collegehumor'
-
-	def report_webpage(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group('videoid')
-
-		self.report_webpage(video_id)
-		request = urllib2.Request(url)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
-		if m is None:
-			self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
-			return
-		internal_video_id = m.group('internalvideoid')
-
-		info = {
-			'id': video_id,
-			'internal_id': internal_video_id,
-		}
-
-		self.report_extraction(video_id)
-		xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
-		try:
-			metaXml = urllib2.urlopen(xmlUrl).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err))
-			return
-
-		mdoc = xml.etree.ElementTree.fromstring(metaXml)
-		try:
-			videoNode = mdoc.findall('./video')[0]
-			info['description'] = videoNode.findall('./description')[0].text
-			info['title'] = videoNode.findall('./caption')[0].text
-			info['url'] = videoNode.findall('./file')[0].text
-			info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
-			info['ext'] = info['url'].rpartition('.')[2]
-			info['format'] = info['ext']
-		except IndexError:
-			self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
-			return
-
-		return [info]
+    """Information extractor for collegehumor.com"""
+
+    _WORKING = False
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
+    IE_NAME = u'collegehumor'
+
+    def report_manifest(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Downloading XML manifest' % (self.IE_NAME, video_id))
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group('videoid')
+
+        info = {
+            'id': video_id,
+            'uploader': None,
+            'upload_date': None,
+        }
+
+        self.report_extraction(video_id)
+        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
+        try:
+            metaXml = compat_urllib_request.urlopen(xmlUrl).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            return
+
+        mdoc = xml.etree.ElementTree.fromstring(metaXml)
+        try:
+            videoNode = mdoc.findall('./video')[0]
+            info['description'] = videoNode.findall('./description')[0].text
+            info['title'] = videoNode.findall('./caption')[0].text
+            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
+            manifest_url = videoNode.findall('./file')[0].text
+        except IndexError:
+            self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+            return
+
+        manifest_url += '?hdcore=2.10.3'
+        self.report_manifest(video_id)
+        try:
+            manifestXml = compat_urllib_request.urlopen(manifest_url).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+            return
+
+        adoc = xml.etree.ElementTree.fromstring(manifestXml)
+        try:
+            media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0]
+            node_id = media_node.attrib['url']
+            video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
+        except IndexError as err:
+            self._downloader.trouble(u'\nERROR: Invalid manifest file')
+            return
+
+        url_pr = compat_urllib_parse_urlparse(manifest_url)
+        url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
+
+        info['url'] = url
+        info['ext'] = 'f4f'
+        return [info]
 
 
 
 
 class XVideosIE(InfoExtractor):
 class XVideosIE(InfoExtractor):
-	"""Information extractor for xvideos.com"""
-
-	_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
-	IE_NAME = u'xvideos'
-
-	def report_webpage(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+    """Information extractor for xvideos.com"""
 
 
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+    IE_NAME = u'xvideos'
 
 
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group(1).decode('utf-8')
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
 
 
-		self.report_webpage(video_id)
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group(1)
 
 
-		request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
+        webpage = self._download_webpage(url, video_id)
 
 
-		self.report_extraction(video_id)
+        self.report_extraction(video_id)
 
 
 
 
-		# Extract video URL
-		mobj = re.search(r'flv_url=(.+?)&', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video url')
-			return
-		video_url = urllib2.unquote(mobj.group(1).decode('utf-8'))
+        # Extract video URL
+        mobj = re.search(r'flv_url=(.+?)&', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video url')
+            return
+        video_url = compat_urllib_parse.unquote(mobj.group(1))
 
 
 
 
-		# Extract title
-		mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
+        # Extract title
+        mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = mobj.group(1)
 
 
 
 
-		# Extract video thumbnail
-		mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
-			return
-		video_thumbnail = mobj.group(0).decode('utf-8')
+        # Extract video thumbnail
+        mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            return
+        video_thumbnail = mobj.group(0)
 
 
-		info = {
-			'id': video_id,
-			'url': video_url,
-			'uploader': None,
-			'upload_date': None,
-			'title': video_title,
-			'ext': 'flv',
-			'format': 'flv',
-			'thumbnail': video_thumbnail,
-			'description': None,
-			'player_url': None,
-		}
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': 'flv',
+            'thumbnail': video_thumbnail,
+            'description': None,
+        }
 
 
-		return [info]
+        return [info]
 
 
 
 
 class SoundcloudIE(InfoExtractor):
 class SoundcloudIE(InfoExtractor):
-	"""Information extractor for soundcloud.com
-	   To access the media, the uid of the song and a stream token
-	   must be extracted from the page source and the script must make
-	   a request to media.soundcloud.com/crossdomain.xml. Then
-	   the media can be grabbed by requesting from an url composed
-	   of the stream token and uid
-	 """
-
-	_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
-	IE_NAME = u'soundcloud'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_webpage(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		# extract uploader (which is in the url)
-		uploader = mobj.group(1).decode('utf-8')
-		# extract simple title (uploader + slug of song title)
-		slug_title =  mobj.group(2).decode('utf-8')
-		simple_title = uploader + u'-' + slug_title
-
-		self.report_webpage('%s/%s' % (uploader, slug_title))
-
-		request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		self.report_extraction('%s/%s' % (uploader, slug_title))
-
-		# extract uid and stream token that soundcloud hands out for access
-		mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
-		if mobj:
-			video_id = mobj.group(1)
-			stream_token = mobj.group(2)
-
-		# extract unsimplified title
-		mobj = re.search('"title":"(.*?)",', webpage)
-		if mobj:
-			title = mobj.group(1).decode('utf-8')
-		else:
-			title = simple_title
-
-		# construct media url (with uid/token)
-		mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
-		mediaURL = mediaURL % (video_id, stream_token)
-
-		# description
-		description = u'No description available'
-		mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
-		if mobj:
-			description = mobj.group(1)
-		
-		# upload date
-		upload_date = None
-		mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
-		if mobj:
-			try:
-				upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
-			except Exception, e:
-				self._downloader.to_stderr(str(e))
-
-		# for soundcloud, a request to a cross domain is required for cookies
-		request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		mediaURL,
-			'uploader':	uploader.decode('utf-8'),
-			'upload_date':  upload_date,
-			'title':	title,
-			'ext':		u'mp3',
-			'format':	u'NA',
-			'player_url':	None,
-			'description': description.decode('utf-8')
-		}]
+    """Information extractor for soundcloud.com
+       To access the media, the uid of the song and a stream token
+       must be extracted from the page source and the script must make
+       a request to media.soundcloud.com/crossdomain.xml. Then
+       the media can be grabbed by requesting from an url composed
+       of the stream token and uid
+     """
+
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
+    IE_NAME = u'soundcloud'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_resolve(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        # extract uploader (which is in the url)
+        uploader = mobj.group(1)
+        # extract simple title (uploader + slug of song title)
+        slug_title =  mobj.group(2)
+        simple_title = uploader + u'-' + slug_title
+
+        self.report_resolve('%s/%s' % (uploader, slug_title))
+
+        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(resolv_url)
+        try:
+            info_json_bytes = compat_urllib_request.urlopen(request).read()
+            info_json = info_json_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+            return
+
+        info = json.loads(info_json)
+        video_id = info['id']
+        self.report_extraction('%s/%s' % (uploader, slug_title))
+
+        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        request = compat_urllib_request.Request(streams_url)
+        try:
+            stream_json_bytes = compat_urllib_request.urlopen(request).read()
+            stream_json = stream_json_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+            return
+
+        streams = json.loads(stream_json)
+        mediaURL = streams['http_mp3_128_url']
+
+        return [{
+            'id':       info['id'],
+            'url':      mediaURL,
+            'uploader': info['user']['username'],
+            'upload_date':  info['created_at'],
+            'title':    info['title'],
+            'ext':      u'mp3',
+            'description': info['description'],
+        }]
 
 
 
 
 class InfoQIE(InfoExtractor):
 class InfoQIE(InfoExtractor):
-	"""Information extractor for infoq.com"""
-
-	_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
-	IE_NAME = u'infoq'
-
-	def report_webpage(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		self.report_webpage(url)
-
-		request = urllib2.Request(url)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		self.report_extraction(url)
-
-
-		# Extract video URL
-		mobj = re.search(r"jsclassref='([^']*)'", webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video url')
-			return
-		video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64'))
-
-
-		# Extract title
-		mobj = re.search(r'contentTitle = "(.*?)";', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = mobj.group(1).decode('utf-8')
-
-		# Extract description
-		video_description = u'No description available.'
-		mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
-		if mobj is not None:
-			video_description = mobj.group(1).decode('utf-8')
-
-		video_filename = video_url.split('/')[-1]
-		video_id, extension = video_filename.split('.')
-
-		info = {
-			'id': video_id,
-			'url': video_url,
-			'uploader': None,
-			'upload_date': None,
-			'title': video_title,
-			'ext': extension,
-			'format': extension, # Extension is always(?) mp4, but seems to be flv
-			'thumbnail': None,
-			'description': video_description,
-			'player_url': None,
-		}
-
-		return [info]
+    """Information extractor for infoq.com"""
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        webpage = self._download_webpage(url, video_id=url)
+        self.report_extraction(url)
+
+        # Extract video URL
+        mobj = re.search(r"jsclassref='([^']*)'", webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video url')
+            return
+        real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
+        video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
+
+        # Extract title
+        mobj = re.search(r'contentTitle = "(.*?)";', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = mobj.group(1)
+
+        # Extract description
+        video_description = u'No description available.'
+        mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
+        if mobj is not None:
+            video_description = mobj.group(1)
+
+        video_filename = video_url.split('/')[-1]
+        video_id, extension = video_filename.split('.')
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': extension, # Extension is always(?) mp4, but seems to be flv
+            'thumbnail': None,
+            'description': video_description,
+        }
+
+        return [info]
 
 
 class MixcloudIE(InfoExtractor):
 class MixcloudIE(InfoExtractor):
-	"""Information extractor for www.mixcloud.com"""
-	_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
-	IE_NAME = u'mixcloud'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_json(self, file_id):
-		"""Report JSON download."""
-		self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
-
-	def report_extraction(self, file_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
-
-	def get_urls(self, jsonData, fmt, bitrate='best'):
-		"""Get urls from 'audio_formats' section in json"""
-		file_url = None
-		try:
-			bitrate_list = jsonData[fmt]
-			if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
-				bitrate = max(bitrate_list) # select highest
-
-			url_list = jsonData[fmt][bitrate]
-		except TypeError: # we have no bitrate info.
-			url_list = jsonData[fmt]
-		return url_list
-
-	def check_urls(self, url_list):
-		"""Returns 1st active url from list"""
-		for url in url_list:
-			try:
-				urllib2.urlopen(url)
-				return url
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				url = None
-
-		return None
-
-	def _print_formats(self, formats):
-		print 'Available formats:'
-		for fmt in formats.keys():
-			for b in formats[fmt]:
-				try:
-					ext = formats[fmt][b][0]
-					print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])
-				except TypeError: # we have no bitrate info
-					ext = formats[fmt][0]
-					print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])
-					break
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		# extract uploader & filename from url
-		uploader = mobj.group(1).decode('utf-8')
-		file_id = uploader + "-" + mobj.group(2).decode('utf-8')
-
-		# construct API request
-		file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
-		# retrieve .json file with links to files
-		request = urllib2.Request(file_url)
-		try:
-			self.report_download_json(file_url)
-			jsonData = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err))
-			return
-
-		# parse JSON
-		json_data = json.loads(jsonData)
-		player_url = json_data['player_swf_url']
-		formats = dict(json_data['audio_formats'])
-
-		req_format = self._downloader.params.get('format', None)
-		bitrate = None
-
-		if self._downloader.params.get('listformats', None):
-			self._print_formats(formats)
-			return
-
-		if req_format is None or req_format == 'best':
-			for format_param in formats.keys():
-				url_list = self.get_urls(formats, format_param)
-				# check urls
-				file_url = self.check_urls(url_list)
-				if file_url is not None:
-					break # got it!
-		else:
-			if req_format not in formats.keys():
-				self._downloader.trouble(u'ERROR: format is not available')
-				return
-
-			url_list = self.get_urls(formats, req_format)
-			file_url = self.check_urls(url_list)
-			format_param = req_format
-
-		return [{
-			'id': file_id.decode('utf-8'),
-			'url': file_url.decode('utf-8'),
-			'uploader':	uploader.decode('utf-8'),
-			'upload_date': u'NA',
-			'title': json_data['name'],
-			'ext': file_url.split('.')[-1].decode('utf-8'),
-			'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
-			'thumbnail': json_data['thumbnail_url'],
-			'description': json_data['description'],
-			'player_url': player_url.decode('utf-8'),
-		}]
+    """Information extractor for www.mixcloud.com"""
+
+    _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
+    IE_NAME = u'mixcloud'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_download_json(self, file_id):
+        """Report JSON download."""
+        self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
+
+    def report_extraction(self, file_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+    def get_urls(self, jsonData, fmt, bitrate='best'):
+        """Get urls from 'audio_formats' section in json"""
+        file_url = None
+        try:
+            bitrate_list = jsonData[fmt]
+            if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
+                bitrate = max(bitrate_list) # select highest
+
+            url_list = jsonData[fmt][bitrate]
+        except TypeError: # we have no bitrate info.
+            url_list = jsonData[fmt]
+        return url_list
+
+    def check_urls(self, url_list):
+        """Returns 1st active url from list"""
+        for url in url_list:
+            try:
+                compat_urllib_request.urlopen(url)
+                return url
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                url = None
+
+        return None
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for fmt in formats.keys():
+            for b in formats[fmt]:
+                try:
+                    ext = formats[fmt][b][0]
+                    print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
+                except TypeError: # we have no bitrate info
+                    ext = formats[fmt][0]
+                    print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
+                    break
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        # extract uploader & filename from url
+        uploader = mobj.group(1).decode('utf-8')
+        file_id = uploader + "-" + mobj.group(2).decode('utf-8')
+
+        # construct API request
+        file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
+        # retrieve .json file with links to files
+        request = compat_urllib_request.Request(file_url)
+        try:
+            self.report_download_json(file_url)
+            jsonData = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
+            return
+
+        # parse JSON
+        json_data = json.loads(jsonData)
+        player_url = json_data['player_swf_url']
+        formats = dict(json_data['audio_formats'])
+
+        req_format = self._downloader.params.get('format', None)
+        bitrate = None
+
+        if self._downloader.params.get('listformats', None):
+            self._print_formats(formats)
+            return
+
+        if req_format is None or req_format == 'best':
+            for format_param in formats.keys():
+                url_list = self.get_urls(formats, format_param)
+                # check urls
+                file_url = self.check_urls(url_list)
+                if file_url is not None:
+                    break # got it!
+        else:
+            if req_format not in formats:
+                self._downloader.trouble(u'ERROR: format is not available')
+                return
+
+            url_list = self.get_urls(formats, req_format)
+            file_url = self.check_urls(url_list)
+            format_param = req_format
+
+        return [{
+            'id': file_id.decode('utf-8'),
+            'url': file_url.decode('utf-8'),
+            'uploader': uploader.decode('utf-8'),
+            'upload_date': None,
+            'title': json_data['name'],
+            'ext': file_url.split('.')[-1].decode('utf-8'),
+            'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
+            'thumbnail': json_data['thumbnail_url'],
+            'description': json_data['description'],
+            'player_url': player_url.decode('utf-8'),
+        }]
 
 
 class StanfordOpenClassroomIE(InfoExtractor):
 class StanfordOpenClassroomIE(InfoExtractor):
-	"""Information extractor for Stanford's Open ClassRoom"""
-
-	_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
-	IE_NAME = u'stanfordoc'
-
-	def report_download_webpage(self, objid):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		if mobj.group('course') and mobj.group('video'): # A specific video
-			course = mobj.group('course')
-			video = mobj.group('video')
-			info = {
-				'id': course + '_' + video,
-			}
-
-			self.report_extraction(info['id'])
-			baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
-			xmlUrl = baseUrl + video + '.xml'
-			try:
-				metaXml = urllib2.urlopen(xmlUrl).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err))
-				return
-			mdoc = xml.etree.ElementTree.fromstring(metaXml)
-			try:
-				info['title'] = mdoc.findall('./title')[0].text
-				info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
-			except IndexError:
-				self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
-				return
-			info['ext'] = info['url'].rpartition('.')[2]
-			info['format'] = info['ext']
-			return [info]
-		elif mobj.group('course'): # A course page
-			course = mobj.group('course')
-			info = {
-				'id': course,
-				'type': 'playlist',
-			}
-
-			self.report_download_webpage(info['id'])
-			try:
-				coursepage = urllib2.urlopen(url).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
-				return
-
-			m = re.search('<h1>([^<]+)</h1>', coursepage)
-			if m:
-				info['title'] = unescapeHTML(m.group(1))
-			else:
-				info['title'] = info['id']
-
-			m = re.search('<description>([^<]+)</description>', coursepage)
-			if m:
-				info['description'] = unescapeHTML(m.group(1))
-
-			links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
-			info['list'] = [
-				{
-					'type': 'reference',
-					'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
-				}
-					for vpage in links]
-			results = []
-			for entry in info['list']:
-				assert entry['type'] == 'reference'
-				results += self.extract(entry['url'])
-			return results
-			
-		else: # Root page
-			info = {
-				'id': 'Stanford OpenClassroom',
-				'type': 'playlist',
-			}
-
-			self.report_download_webpage(info['id'])
-			rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
-			try:
-				rootpage = urllib2.urlopen(rootURL).read()
-			except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-				self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
-				return
-
-			info['title'] = info['id']
-
-			links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
-			info['list'] = [
-				{
-					'type': 'reference',
-					'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
-				}
-					for cpage in links]
-
-			results = []
-			for entry in info['list']:
-				assert entry['type'] == 'reference'
-				results += self.extract(entry['url'])
-			return results
+    """Information extractor for Stanford's Open ClassRoom"""
+
+    _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
+    IE_NAME = u'stanfordoc'
+
+    def report_download_webpage(self, objid):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        if mobj.group('course') and mobj.group('video'): # A specific video
+            course = mobj.group('course')
+            video = mobj.group('video')
+            info = {
+                'id': course + '_' + video,
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            self.report_extraction(info['id'])
+            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
+            xmlUrl = baseUrl + video + '.xml'
+            try:
+                metaXml = compat_urllib_request.urlopen(xmlUrl).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+                return
+            mdoc = xml.etree.ElementTree.fromstring(metaXml)
+            try:
+                info['title'] = mdoc.findall('./title')[0].text
+                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
+            except IndexError:
+                self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+                return
+            info['ext'] = info['url'].rpartition('.')[2]
+            return [info]
+        elif mobj.group('course'): # A course page
+            course = mobj.group('course')
+            info = {
+                'id': course,
+                'type': 'playlist',
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            self.report_download_webpage(info['id'])
+            try:
+                coursepage = compat_urllib_request.urlopen(url).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+                return
+
+            m = re.search('<h1>([^<]+)</h1>', coursepage)
+            if m:
+                info['title'] = unescapeHTML(m.group(1))
+            else:
+                info['title'] = info['id']
+
+            m = re.search('<description>([^<]+)</description>', coursepage)
+            if m:
+                info['description'] = unescapeHTML(m.group(1))
+
+            links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
+            info['list'] = [
+                {
+                    'type': 'reference',
+                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
+                }
+                    for vpage in links]
+            results = []
+            for entry in info['list']:
+                assert entry['type'] == 'reference'
+                results += self.extract(entry['url'])
+            return results
+
+        else: # Root page
+            info = {
+                'id': 'Stanford OpenClassroom',
+                'type': 'playlist',
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            self.report_download_webpage(info['id'])
+            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
+            try:
+                rootpage = compat_urllib_request.urlopen(rootURL).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+                return
+
+            info['title'] = info['id']
+
+            links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
+            info['list'] = [
+                {
+                    'type': 'reference',
+                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
+                }
+                    for cpage in links]
+
+            results = []
+            for entry in info['list']:
+                assert entry['type'] == 'reference'
+                results += self.extract(entry['url'])
+            return results
 
 
 class MTVIE(InfoExtractor):
 class MTVIE(InfoExtractor):
-	"""Information extractor for MTV.com"""
-
-	_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
-	IE_NAME = u'mtv'
-
-	def report_webpage(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		if not mobj.group('proto'):
-			url = 'http://' + url
-		video_id = mobj.group('videoid')
-		self.report_webpage(video_id)
-
-		request = urllib2.Request(url)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
-			return
-
-		mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract song name')
-			return
-		song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
-		mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract performer')
-			return
-		performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
-		video_title = performer + ' - ' + song_name 
-
-		mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to mtvn_uri')
-			return
-		mtvn_uri = mobj.group(1)
-
-		mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract content id')
-			return
-		content_id = mobj.group(1)
-
-		videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
-		self.report_extraction(video_id)
-		request = urllib2.Request(videogen_url)
-		try:
-			metadataXml = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % str(err))
-			return
-
-		mdoc = xml.etree.ElementTree.fromstring(metadataXml)
-		renditions = mdoc.findall('.//rendition')
-
-		# For now, always pick the highest quality.
-		rendition = renditions[-1]
-
-		try:
-			_,_,ext = rendition.attrib['type'].partition('/')
-			format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
-			video_url = rendition.find('./src').text
-		except KeyError:
-			self._downloader.trouble('Invalid rendition field.')
-			return
-
-		info = {
-			'id': video_id,
-			'url': video_url,
-			'uploader': performer,
-			'title': video_title,
-			'ext': ext,
-			'format': format,
-		}
-
-		return [info]
+    """Information extractor for MTV.com"""
+
+    _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
+    IE_NAME = u'mtv'
+
+    def report_extraction(self, video_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        if not mobj.group('proto'):
+            url = 'http://' + url
+        video_id = mobj.group('videoid')
+
+        webpage = self._download_webpage(url, video_id)
+
+        mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract song name')
+            return
+        song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
+        mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract performer')
+            return
+        performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
+        video_title = performer + ' - ' + song_name
+
+        mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+            return
+        mtvn_uri = mobj.group(1)
+
+        mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract content id')
+            return
+        content_id = mobj.group(1)
+
+        videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
+        self.report_extraction(video_id)
+        request = compat_urllib_request.Request(videogen_url)
+        try:
+            metadataXml = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+            return
+
+        mdoc = xml.etree.ElementTree.fromstring(metadataXml)
+        renditions = mdoc.findall('.//rendition')
+
+        # For now, always pick the highest quality.
+        rendition = renditions[-1]
+
+        try:
+            _,_,ext = rendition.attrib['type'].partition('/')
+            format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
+            video_url = rendition.find('./src').text
+        except KeyError:
+            self._downloader.trouble('Invalid rendition field.')
+            return
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'uploader': performer,
+            'upload_date': None,
+            'title': video_title,
+            'ext': ext,
+            'format': format,
+        }
+
+        return [info]
 
 
 
 
 class YoukuIE(InfoExtractor):
 class YoukuIE(InfoExtractor):
-
-	_VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
-	IE_NAME = u'Youku'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_download_webpage(self, file_id):
-		"""Report webpage download."""
-		self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
-
-	def report_extraction(self, file_id):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
-
-	def _gen_sid(self):
-		nowTime = int(time.time() * 1000)
-		random1 = random.randint(1000,1998)
-		random2 = random.randint(1000,9999)
-
-		return "%d%d%d" %(nowTime,random1,random2)
-
-	def _get_file_ID_mix_string(self, seed):
-		mixed = []
-		source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
-		seed = float(seed)
-		for i in range(len(source)):
-			seed  =  (seed * 211 + 30031 ) % 65536
-			index  =  math.floor(seed / 65536 * len(source) )
-			mixed.append(source[int(index)])
-			source.remove(source[int(index)])
-		#return ''.join(mixed)
-		return mixed
-
-	def _get_file_id(self, fileId, seed):
-		mixed = self._get_file_ID_mix_string(seed)
-		ids = fileId.split('*')
-		realId = []
-		for ch in ids:
-			if ch:
-				realId.append(mixed[int(ch)])
-		return ''.join(realId)
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group('ID')
-
-		info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
-
-		request = urllib2.Request(info_url, None, std_headers)
-		try:
-			self.report_download_webpage(video_id)
-			jsondata = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-
-		self.report_extraction(video_id)
-		try:
-			config = json.loads(jsondata)
-
-			video_title =  config['data'][0]['title']
-			seed = config['data'][0]['seed']
-
-			format = self._downloader.params.get('format', None)
-			supported_format = config['data'][0]['streamfileids'].keys()
-
-			if format is None or format == 'best':
-				if 'hd2' in supported_format:
-					format = 'hd2'
-				else:
-					format = 'flv'
-				ext = u'flv'
-			elif format == 'worst':
-				format = 'mp4'
-				ext = u'mp4'
-			else:
-				format = 'flv'
-				ext = u'flv'
-
-
-			fileid = config['data'][0]['streamfileids'][format]
-			seg_number = len(config['data'][0]['segs'][format])
-
-			keys=[]
-			for i in xrange(seg_number):
-				keys.append(config['data'][0]['segs'][format][i]['k'])
-
-			#TODO check error
-			#youku only could be viewed from mainland china
-		except:
-			self._downloader.trouble(u'ERROR: unable to extract info section')
-			return
-
-		files_info=[]
-		sid = self._gen_sid()
-		fileid = self._get_file_id(fileid, seed)
-
-		#column 8,9 of fileid represent the segment number
-		#fileid[7:9] should be changed
-		for index, key in enumerate(keys):
-
-			temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
-			download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
-
-			info = {
-				'id': '%s_part%02d' % (video_id, index),
-				'url': download_url,
-				'uploader': None,
-				'title': video_title,
-				'ext': ext,
-				'format': u'NA'
-			}
-			files_info.append(info)
-
-		return files_info
+    _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+
+    def report_download_webpage(self, file_id):
+        """Report webpage download."""
+        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id))
+
+    def report_extraction(self, file_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+    def _gen_sid(self):
+        nowTime = int(time.time() * 1000)
+        random1 = random.randint(1000,1998)
+        random2 = random.randint(1000,9999)
+
+        return "%d%d%d" %(nowTime,random1,random2)
+
+    def _get_file_ID_mix_string(self, seed):
+        mixed = []
+        source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+        seed = float(seed)
+        for i in range(len(source)):
+            seed  =  (seed * 211 + 30031 ) % 65536
+            index  =  math.floor(seed / 65536 * len(source) )
+            mixed.append(source[int(index)])
+            source.remove(source[int(index)])
+        #return ''.join(mixed)
+        return mixed
+
+    def _get_file_id(self, fileId, seed):
+        mixed = self._get_file_ID_mix_string(seed)
+        ids = fileId.split('*')
+        realId = []
+        for ch in ids:
+            if ch:
+                realId.append(mixed[int(ch)])
+        return ''.join(realId)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group('ID')
+
+        info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+        request = compat_urllib_request.Request(info_url, None, std_headers)
+        try:
+            self.report_download_webpage(video_id)
+            jsondata = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+
+        self.report_extraction(video_id)
+        try:
+            jsonstr = jsondata.decode('utf-8')
+            config = json.loads(jsonstr)
+
+            video_title =  config['data'][0]['title']
+            seed = config['data'][0]['seed']
+
+            format = self._downloader.params.get('format', None)
+            supported_format = list(config['data'][0]['streamfileids'].keys())
+
+            if format is None or format == 'best':
+                if 'hd2' in supported_format:
+                    format = 'hd2'
+                else:
+                    format = 'flv'
+                ext = u'flv'
+            elif format == 'worst':
+                format = 'mp4'
+                ext = u'mp4'
+            else:
+                format = 'flv'
+                ext = u'flv'
+
+
+            fileid = config['data'][0]['streamfileids'][format]
+            keys = [s['k'] for s in config['data'][0]['segs'][format]]
+        except (UnicodeDecodeError, ValueError, KeyError):
+            self._downloader.trouble(u'ERROR: unable to extract info section')
+            return
+
+        files_info=[]
+        sid = self._gen_sid()
+        fileid = self._get_file_id(fileid, seed)
+
+        #column 8,9 of fileid represent the segment number
+        #fileid[7:9] should be changed
+        for index, key in enumerate(keys):
+
+            temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+            info = {
+                'id': '%s_part%02d' % (video_id, index),
+                'url': download_url,
+                'uploader': None,
+                'upload_date': None,
+                'title': video_title,
+                'ext': ext,
+            }
+            files_info.append(info)
+
+        return files_info
 
 
 
 
 class XNXXIE(InfoExtractor):
 class XNXXIE(InfoExtractor):
-	"""Information extractor for xnxx.com"""
-
-	_VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
-	IE_NAME = u'xnxx'
-	VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
-	VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
-	VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
-
-	def report_webpage(self, video_id):
-		"""Report information extraction"""
-		self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
-	def report_extraction(self, video_id):
-		"""Report information extraction"""
-		self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-		video_id = mobj.group(1).decode('utf-8')
-
-		self.report_webpage(video_id)
-
-		# Get webpage content
-		try:
-			webpage = urllib2.urlopen(url).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-			return
-
-		result = re.search(self.VIDEO_URL_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video url')
-			return
-		video_url = urllib.unquote(result.group(1).decode('utf-8'))
-
-		result = re.search(self.VIDEO_TITLE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = result.group(1).decode('utf-8')
-
-		result = re.search(self.VIDEO_THUMB_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
-			return
-		video_thumbnail = result.group(1).decode('utf-8')
-
-		info = {'id': video_id,
-				'url': video_url,
-				'uploader': None,
-				'upload_date': None,
-				'title': video_title,
-				'ext': 'flv',
-				'format': 'flv',
-				'thumbnail': video_thumbnail,
-				'description': None,
-				'player_url': None}
-
-		return [info]
+    """Information extractor for xnxx.com"""
+
+    _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+    IE_NAME = u'xnxx'
+    VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
+    VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+    VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
+
+    def report_webpage(self, video_id):
+        """Report information extraction"""
+        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+    def report_extraction(self, video_id):
+        """Report information extraction"""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+        video_id = mobj.group(1)
+
+        self.report_webpage(video_id)
+
+        # Get webpage content
+        try:
+            webpage_bytes = compat_urllib_request.urlopen(url).read()
+            webpage = webpage_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+            return
+
+        result = re.search(self.VIDEO_URL_RE, webpage)
+        if result is None:
+            self._downloader.trouble(u'ERROR: unable to extract video url')
+            return
+        video_url = compat_urllib_parse.unquote(result.group(1))
+
+        result = re.search(self.VIDEO_TITLE_RE, webpage)
+        if result is None:
+            self._downloader.trouble(u'ERROR: unable to extract video title')
+            return
+        video_title = result.group(1)
+
+        result = re.search(self.VIDEO_THUMB_RE, webpage)
+        if result is None:
+            self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+            return
+        video_thumbnail = result.group(1)
+
+        return [{
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': 'flv',
+            'thumbnail': video_thumbnail,
+            'description': None,
+        }]
 
 
 
 
 class GooglePlusIE(InfoExtractor):
 class GooglePlusIE(InfoExtractor):
-	"""Information extractor for plus.google.com."""
-
-	_VALID_URL = r'(?:https://)?plus\.google\.com/(?:\w+/)*?(\d+)/posts/(\w+)'
-	IE_NAME = u'plus.google'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_extract_entry(self, url):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url.decode('utf-8'))
-
-	def report_date(self, upload_date):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
-
-	def report_uploader(self, uploader):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader.decode('utf-8'))
-
-	def report_title(self, video_title):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[plus.google] Title: %s' % video_title.decode('utf-8'))
-
-	def report_extract_vid_page(self, video_page):
-		"""Report information extraction."""
-		self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page.decode('utf-8'))
-
-	def _real_extract(self, url):
-		# Extract id from URL
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
-			return
-
-		post_url = mobj.group(0)
-		video_id = mobj.group(2)
-
-		video_extension = 'flv'
-
-		# Step 1, Retrieve post webpage to extract further information
-		self.report_extract_entry(post_url)
-		request = urllib2.Request(post_url)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % str(err))
-			return
-
-		# Extract update date
-		upload_date = u'NA'
-		pattern = 'title="Timestamp">(.*?)</a>'
-		mobj = re.search(pattern, webpage)
-		if mobj:
-			upload_date = mobj.group(1)
-			# Convert timestring to a format suitable for filename
-			upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
-			upload_date = upload_date.strftime('%Y%m%d')
-		self.report_date(upload_date)
-
-		# Extract uploader
-		uploader = u'NA'
-		pattern = r'rel\="author".*?>(.*?)</a>'
-		mobj = re.search(pattern, webpage)
-		if mobj:
-			uploader = mobj.group(1)
-		self.report_uploader(uploader)
-
-		# Extract title
-		# Get the first line for title
-		video_title = u'NA'
-		pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
-		mobj = re.search(pattern, webpage)
-		if mobj:
-			video_title = mobj.group(1)
-		self.report_title(video_title)
-
-		# Step 2, Stimulate clicking the image box to launch video
-		pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
-		mobj = re.search(pattern, webpage)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: unable to extract video page URL')
-
-		video_page = mobj.group(1)
-		request = urllib2.Request(video_page)
-		try:
-			webpage = urllib2.urlopen(request).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
-			return
-		self.report_extract_vid_page(video_page)
-
-
-		# Extract video links on video page
-		"""Extract video links of all sizes"""
-		pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
-		mobj = re.findall(pattern, webpage)
-		if len(mobj) == 0:
-			self._downloader.trouble(u'ERROR: unable to extract video links')
-
-		# Sort in resolution
-		links = sorted(mobj)
-
-		# Choose the lowest of the sort, i.e. highest resolution
-		video_url = links[-1]
-		# Only get the url. The resolution part in the tuple has no use anymore
-		video_url = video_url[-1]
-		# Treat escaped \u0026 style hex
-		video_url = unicode(video_url, "unicode_escape")
-
-
-		return [{
-			'id':		video_id.decode('utf-8'),
-			'url':		video_url,
-			'uploader':	uploader.decode('utf-8'),
-			'upload_date':	upload_date.decode('utf-8'),
-			'title':	video_title.decode('utf-8'),
-			'ext':		video_extension.decode('utf-8'),
-			'format':	u'NA',
-			'player_url':	None,
-		}]
-
-
-
-class YouPornIE(InfoExtractor):
-	"""Information extractor for youporn.com."""
-
-	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
-	IE_NAME = u'youporn'
-	VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
-	VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
-	VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
-	DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
-	LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_id(self, video_id):
-		"""Report finding video ID"""
-		self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
-
-	def report_webpage(self, url):
-		"""Report downloading page"""
-		self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
-
-	def report_title(self, video_title):
-		"""Report dfinding title"""
-		self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
-	
-	def report_uploader(self, uploader):
-		"""Report dfinding title"""
-		self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
-
-	def report_upload_date(self, video_date):
-		"""Report finding date"""
-		self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
-
-	def _print_formats(self, formats):
-		"""Print all available formats"""
-		print 'Available formats:'
-		print u'ext\t\tformat'
-		print u'---------------------------------'
-		for format in formats:
-			print u'%s\t\t%s'  % (format['ext'], format['format'])
-
-	def _specific(self, req_format, formats):
-		for x in formats:
-			if(x["format"]==req_format):
-				return x
-		return None
-
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group('videoid').decode('utf-8')
-		self.report_id(video_id)
-
-		# Get webpage content
-		try:
-			webpage = urllib2.urlopen(url).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-			return
-		self.report_webpage(url)
-
-		# Get the video title
-		result = re.search(self.VIDEO_TITLE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = result.group('title').decode('utf-8').strip()
-		self.report_title(video_title)
-
-		# Get the video date
-		result = re.search(self.VIDEO_DATE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video date')
-			return
-		upload_date = result.group('date').decode('utf-8').strip()
-		self.report_upload_date(upload_date)
-
-		# Get the video uploader
-		result = re.search(self.VIDEO_UPLOADER_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract uploader')
-			return
-		video_uploader = result.group('uploader').decode('utf-8').strip()
-		video_uploader = clean_html( video_uploader )
-		self.report_uploader(video_uploader)
-
-		# Get all of the formats available
-		result = re.search(self.DOWNLOAD_LIST_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract download list')
-			return
-		download_list_html = result.group('download_list').decode('utf-8').strip()
-
-		# Get all of the links from the page
-		links = re.findall(self.LINK_RE, download_list_html)
-		if(len(links) == 0):
-			self._downloader.trouble(u'ERROR: no known formats available for video')
-			return
-		
-		self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))	
-
-		formats = []
-		for link in links:
-
-			# A link looks like this:
-			# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
-			# A path looks like this:
-			# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
-			video_url = unescapeHTML( link.decode('utf-8') )
-			path = urlparse( video_url ).path
-			extension = os.path.splitext( path )[1][1:]
-			format = path.split('/')[4].split('_')[:2]
-			size = format[0]
-			bitrate = format[1]
-			format = "-".join( format )
-			title = u'%s-%s-%s' % (video_title, size, bitrate)
-
-			formats.append({
-				'id': video_id,
-				'url': video_url,
-				'uploader': video_uploader,
-				'upload_date': upload_date,
-				'title': title,
-				'ext': extension,
-				'format': format,
-				'thumbnail': None,
-				'description': None,
-				'player_url': None
-			})
-
-		if self._downloader.params.get('listformats', None):
-			self._print_formats(formats)
-			return
-
-		req_format = self._downloader.params.get('format', None)
-		#format_limit = self._downloader.params.get('format_limit', None)
-		self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
-
-
-		if req_format is None or req_format == 'best':
-			return [formats[0]]
-		elif req_format == 'worst':
-			return [formats[-1]]
-		elif req_format in ('-1', 'all'):
-			return formats
-		else:
-			format = self._specific( req_format, formats )
-			if result is None:
-				self._downloader.trouble(u'ERROR: requested format not available')
-				return
-			return [format]
-
-		
-
-
-class PornotubeIE(InfoExtractor):
-	"""Information extractor for pornotube.com."""
-
-	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
-	IE_NAME = u'pornotube'
-	VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
-	VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
-
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_extract_entry(self, url):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
-
-	def report_date(self, upload_date):
-		"""Report finding uploaded date"""
-		self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
-
-	def report_webpage(self, url):
-		"""Report downloading page"""
-		self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
-
-	def report_title(self, video_title):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
-
-	def _real_extract(self, url):
-		mobj = re.match(self._VALID_URL, url)
-		if mobj is None:
-			self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
-			return
-
-		video_id = mobj.group('videoid').decode('utf-8')
-		video_title = mobj.group('title').decode('utf-8')
-		self.report_title(video_title);
-
-		# Get webpage content
-		try:
-			webpage = urllib2.urlopen(url).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-			return
-		self.report_webpage(url)
-
-		# Get the video URL
-		result = re.search(self.VIDEO_URL_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video url')
-			return
-		video_url = urllib.unquote(result.group('url').decode('utf-8'))
-		self.report_extract_entry(video_url)
-
-		#Get the uploaded date
-		result = re.search(self.VIDEO_UPLOADED_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		upload_date = result.group('date').decode('utf-8')
-		self.report_date(upload_date);
-
-
-		info = {'id': video_id,
-				'url': video_url,
-				'uploader': None,
-				'upload_date': upload_date,
-				'title': video_title,
-				'ext': 'flv',
-				'format': 'flv',
-				'thumbnail': None,
-				'description': None,
-				'player_url': None}
-
-		return [info]
-
-
-
-
-class YouJizzIE(InfoExtractor):
-	"""Information extractor for youjizz.com."""
-
-	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/([^.]+).html$'
-	IE_NAME = u'youjizz'
-	VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
-	EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
-	SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
-
-	def __init__(self, downloader=None):
-		InfoExtractor.__init__(self, downloader)
-
-	def report_extract_entry(self, url):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
-
-	def report_webpage(self, url):
-		"""Report downloading page"""
-		self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
-
-	def report_title(self, video_title):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
-
-	def report_embed_page(self, embed_page):
-		"""Report downloading extry"""
-		self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
-
-	def _real_extract(self, url):
-		# Get webpage content
-		try:
-			webpage = urllib2.urlopen(url).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
-			return
-		self.report_webpage(url)
-
-		# Get the video title
-		result = re.search(self.VIDEO_TITLE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video title')
-			return
-		video_title = result.group('title').decode('utf-8').strip()
-		self.report_title(video_title)
-
-		# Get the embed page
-		result = re.search(self.EMBED_PAGE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract embed page')
-			return
-
-		embed_page_url = result.group(0).decode('utf-8').strip()
-		video_id = result.group('videoid').decode('utf-8')
-		self.report_embed_page(embed_page_url)
-	
-		try:
-			webpage = urllib2.urlopen(embed_page_url).read()
-		except (urllib2.URLError, httplib.HTTPException, socket.error), err:
-			self._downloader.trouble(u'ERROR: unable to download video embed page: %s' % err)
-			return
-		
-		# Get the video URL
-		result = re.search(self.SOURCE_RE, webpage)
-		if result is None:
-			self._downloader.trouble(u'ERROR: unable to extract video url')
-			return
-		video_url = result.group('source').decode('utf-8')
-		self.report_extract_entry(video_url)
-
-		info = {'id': video_id,
-				'url': video_url,
-				'uploader': None,
-				'upload_date': None,
-				'title': video_title,
-				'ext': 'flv',
-				'format': 'flv',
-				'thumbnail': None,
-				'description': None,
-				'player_url': embed_page_url}
-
-		return [info]
+    """Information extractor for plus.google.com."""
+
+    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
+    IE_NAME = u'plus.google'
+
+    def __init__(self, downloader=None):
+        InfoExtractor.__init__(self, downloader)
+
+    def report_extract_entry(self, url):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url)
+
+    def report_date(self, upload_date):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
+
+    def report_uploader(self, uploader):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader)
+
+    def report_title(self, video_title):
+        """Report downloading extry"""
+        self._downloader.to_screen(u'[plus.google] Title: %s' % video_title)
+
+    def report_extract_vid_page(self, video_page):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page)
+
+    def _real_extract(self, url):
+        # Extract id from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+            return
+
+        post_url = mobj.group(0)
+        video_id = mobj.group(1)
+
+        video_extension = 'flv'
+
+        # Step 1, Retrieve post webpage to extract further information
+        self.report_extract_entry(post_url)
+        request = compat_urllib_request.Request(post_url)
+        try:
+            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+            return
+
+        # Extract update date
+        upload_date = None
+        pattern = 'title="Timestamp">(.*?)</a>'
+        mobj = re.search(pattern, webpage)
+        if mobj:
+            upload_date = mobj.group(1)
+            # Convert timestring to a format suitable for filename
+            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
+            upload_date = upload_date.strftime('%Y%m%d')
+        self.report_date(upload_date)
+
+        # Extract uploader
+        uploader = None
+        pattern = r'rel\="author".*?>(.*?)</a>'
+        mobj = re.search(pattern, webpage)
+        if mobj:
+            uploader = mobj.group(1)
+        self.report_uploader(uploader)
+
+        # Extract title
+        # Get the first line for title
+        video_title = u'NA'
+        pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]'
+        mobj = re.search(pattern, webpage)
+        if mobj:
+            video_title = mobj.group(1)
+        self.report_title(video_title)
+
+        # Step 2, Stimulate clicking the image box to launch video
+        pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
+        mobj = re.search(pattern, webpage)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: unable to extract video page URL')
+
+        video_page = mobj.group(1)
+        request = compat_urllib_request.Request(video_page)
+        try:
+            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+            return
+        self.report_extract_vid_page(video_page)
+
+
+        # Extract video links on video page
+        """Extract video links of all sizes"""
+        pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
+        mobj = re.findall(pattern, webpage)
+        if len(mobj) == 0:
+            self._downloader.trouble(u'ERROR: unable to extract video links')
+
+        # Sort in resolution
+        links = sorted(mobj)
+
+        # Choose the lowest of the sort, i.e. highest resolution
+        video_url = links[-1]
+        # Only get the url. The resolution part in the tuple has no use anymore
+        video_url = video_url[-1]
+        # Treat escaped \u0026 style hex
+        try:
+            video_url = video_url.decode("unicode_escape")
+        except AttributeError: # Python 3
+            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': uploader,
+            'upload_date':  upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
+
+class NBAIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$'
+    IE_NAME = u'nba'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group(1)
+        if video_id.endswith('/index.html'):
+            video_id = video_id[:-len('/index.html')]
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
+        def _findProp(rexp, default=None):
+            m = re.search(rexp, webpage)
+            if m:
+                return unescapeHTML(m.group(1))
+            else:
+                return default
+
+        shortened_video_id = video_id.rpartition('/')[2]
+        title = _findProp(r'<meta property="og:title" content="(.*?)"', shortened_video_id).replace('NBA.com: ', '')
+        info = {
+            'id': shortened_video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'uploader_date': _findProp(r'<b>Date:</b> (.*?)</div>'),
+            'description': _findProp(r'<div class="description">(.*?)</h1>'),
+        }
+        return [info]
+
+class JustinTVIE(InfoExtractor):
+    """Information extractor for justin.tv and twitch.tv"""
+    # TODO: One broadcast may be split into multiple videos. The key
+    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+    # starts at 1 and increases. Can we treat all parts as one video?
+
+    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
+        ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$"""
+    _JUSTIN_PAGE_LIMIT = 100
+    IE_NAME = u'justin.tv'
+
+    def report_extraction(self, file_id):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+    def report_download_page(self, channel, offset):
+        """Report attempt to download a single page of videos."""
+        self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' %
+                (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
+
+    # Return count of items, list of *valid* items
+    def _parse_page(self, url):
+        try:
+            urlh = compat_urllib_request.urlopen(url)
+            webpage_bytes = urlh.read()
+            webpage = webpage_bytes.decode('utf-8', 'ignore')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+            return
+
+        response = json.loads(webpage)
+        info = []
+        for clip in response:
+            video_url = clip['video_file_url']
+            if video_url:
+                video_extension = os.path.splitext(video_url)[1][1:]
+                video_date = re.sub('-', '', clip['created_on'][:10])
+                info.append({
+                    'id': clip['id'],
+                    'url': video_url,
+                    'title': clip['title'],
+                    'uploader': clip.get('user_id', clip.get('channel_id')),
+                    'upload_date': video_date,
+                    'ext': video_extension,
+                })
+        return (len(response), info)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        api = 'http://api.justin.tv'
+        video_id = mobj.group(mobj.lastindex)
+        paged = False
+        if mobj.lastindex == 1:
+            paged = True
+            api += '/channel/archives/%s.json'
+        else:
+            api += '/clip/show/%s.json'
+        api = api % (video_id,)
+
+        self.report_extraction(video_id)
+
+        info = []
+        offset = 0
+        limit = self._JUSTIN_PAGE_LIMIT
+        while True:
+            if paged:
+                self.report_download_page(video_id, offset)
+            page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
+            page_count, page_info = self._parse_page(page_url)
+            info.extend(page_info)
+            if not paged or page_count != limit:
+                break
+            offset += limit
+        return info
+
+class FunnyOrDieIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
+        if not m:
+            self._downloader.trouble(u'ERROR: unable to find video information')
+        video_url = unescapeHTML(m.group('url'))
+
+        m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+        if not m:
+            self._downloader.trouble(u'Cannot find video title')
+        title = unescapeHTML(m.group('title'))
+
+        m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+        if m:
+            desc = unescapeHTML(m.group('desc'))
+        else:
+            desc = None
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': desc,
+        }
+        return [info]
+
+class TweetReelIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+            return
+
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
+        if not m:
+            self._downloader.trouble(u'ERROR: Cannot find status ID')
+        status_id = m.group(1)
+
+        m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
+        if not m:
+            self._downloader.trouble(u'WARNING: Cannot find description')
+        desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
+
+        m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
+        if not m:
+            self._downloader.trouble(u'ERROR: Cannot find uploader')
+        uploader = unescapeHTML(m.group('uploader'))
+        uploader_id = unescapeHTML(m.group('uploader_id'))
+
+        m = re.search(r'<span unixtime="([0-9]+)"', webpage)
+        if not m:
+            self._downloader.trouble(u'ERROR: Cannot find upload date')
+        upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
+
+        title = desc
+        video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mov',
+            'title': title,
+            'description': desc,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'internal_id': status_id,
+            'upload_date': upload_date
+        }
+        return [info]
+        
+class SteamIE(InfoExtractor):
+    _VALID_URL = r"""http://store.steampowered.com/ 
+                (?P<urltype>video|app)/ #If the page is only for videos or for a game
+                (?P<gameID>\d+)/?
+                (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
+                """
+
+    def suitable(self, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(self._VALID_URL, url, re.VERBOSE) is not None
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url, re.VERBOSE)
+        urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
+        gameID = m.group('gameID')
+        videourl = 'http://store.steampowered.com/video/%s/' % gameID
+        webpage = self._download_webpage(videourl, gameID)
+        mweb = re.finditer(urlRE, webpage)
+        namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
+        titles = re.finditer(namesRE, webpage)
+        videos = []
+        for vid,vtitle in zip(mweb,titles):
+            video_id = vid.group('videoID')
+            title = vtitle.group('videoName')
+            video_url = vid.group('videoURL')
+            if not video_url:
+                self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+            info = {
+                'id':video_id,
+                'url':video_url,
+                'ext': 'flv',
+                'title': unescapeHTML(title)
+                  }
+            videos.append(info)
+        return videos
+        
+class UstreamIE(InfoExtractor):
+    _VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)'
+    IE_NAME = u'ustream'
+    
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+        video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
+        webpage = self._download_webpage(url, video_id)
+        m = re.search(r'data-title="(?P<title>.+)"',webpage)
+        title = m.group('title')
+        m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage)
+        uploader = m.group('uploader')
+        info = {
+                'id':video_id,
+                'url':video_url,
+                'ext': 'flv',
+                'title': title,
+                'uploader': uploader
+                  }
+        return [info]
+
+
+def gen_extractors():
+    """ Return a list of an instance of every supported extractor.
+    The order does matter; the first extractor matched is the one handling the URL.
+    """
+    return [
+        YoutubePlaylistIE(),
+        YoutubeChannelIE(),
+        YoutubeUserIE(),
+        YoutubeSearchIE(),
+        YoutubeIE(),
+        MetacafeIE(),
+        DailymotionIE(),
+        GoogleSearchIE(),
+        PhotobucketIE(),
+        YahooIE(),
+        YahooSearchIE(),
+        DepositFilesIE(),
+        FacebookIE(),
+        BlipTVUserIE(),
+        BlipTVIE(),
+        VimeoIE(),
+        MyVideoIE(),
+        ComedyCentralIE(),
+        EscapistIE(),
+        CollegeHumorIE(),
+        XVideosIE(),
+        SoundcloudIE(),
+        InfoQIE(),
+        MixcloudIE(),
+        StanfordOpenClassroomIE(),
+        MTVIE(),
+        YoukuIE(),
+        XNXXIE(),
+        GooglePlusIE(),
+        ArteTvIE(),
+        NBAIE(),
+        JustinTVIE(),
+        FunnyOrDieIE(),
+        TweetReelIE(),
+        SteamIE(),
+        UstreamIE(),
+        GenericIE()
+    ]
+
 
 

+ 179 - 173
youtube_dl/PostProcessor.py

@@ -1,198 +1,204 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 
 
+from __future__ import absolute_import
+
 import os
 import os
 import subprocess
 import subprocess
 import sys
 import sys
 import time
 import time
 
 
-from utils import *
+from .utils import *
 
 
 
 
 class PostProcessor(object):
 class PostProcessor(object):
-	"""Post Processor class.
+    """Post Processor class.
 
 
-	PostProcessor objects can be added to downloaders with their
-	add_post_processor() method. When the downloader has finished a
-	successful download, it will take its internal chain of PostProcessors
-	and start calling the run() method on each one of them, first with
-	an initial argument and then with the returned value of the previous
-	PostProcessor.
+    PostProcessor objects can be added to downloaders with their
+    add_post_processor() method. When the downloader has finished a
+    successful download, it will take its internal chain of PostProcessors
+    and start calling the run() method on each one of them, first with
+    an initial argument and then with the returned value of the previous
+    PostProcessor.
 
 
-	The chain will be stopped if one of them ever returns None or the end
-	of the chain is reached.
+    The chain will be stopped if one of them ever returns None or the end
+    of the chain is reached.
 
 
-	PostProcessor objects follow a "mutual registration" process similar
-	to InfoExtractor objects.
-	"""
+    PostProcessor objects follow a "mutual registration" process similar
+    to InfoExtractor objects.
+    """
 
 
-	_downloader = None
+    _downloader = None
 
 
-	def __init__(self, downloader=None):
-		self._downloader = downloader
+    def __init__(self, downloader=None):
+        self._downloader = downloader
 
 
-	def set_downloader(self, downloader):
-		"""Sets the downloader for this PP."""
-		self._downloader = downloader
+    def set_downloader(self, downloader):
+        """Sets the downloader for this PP."""
+        self._downloader = downloader
 
 
-	def run(self, information):
-		"""Run the PostProcessor.
+    def run(self, information):
+        """Run the PostProcessor.
 
 
-		The "information" argument is a dictionary like the ones
-		composed by InfoExtractors. The only difference is that this
-		one has an extra field called "filepath" that points to the
-		downloaded file.
+        The "information" argument is a dictionary like the ones
+        composed by InfoExtractors. The only difference is that this
+        one has an extra field called "filepath" that points to the
+        downloaded file.
 
 
-		When this method returns None, the postprocessing chain is
-		stopped. However, this method may return an information
-		dictionary that will be passed to the next postprocessing
-		object in the chain. It can be the one it received after
-		changing some fields.
+        When this method returns None, the postprocessing chain is
+        stopped. However, this method may return an information
+        dictionary that will be passed to the next postprocessing
+        object in the chain. It can be the one it received after
+        changing some fields.
 
 
-		In addition, this method may raise a PostProcessingError
-		exception that will be taken into account by the downloader
-		it was called from.
-		"""
-		return information # by default, do nothing
+        In addition, this method may raise a PostProcessingError
+        exception that will be taken into account by the downloader
+        it was called from.
+        """
+        return information # by default, do nothing
 
 
 class AudioConversionError(BaseException):
 class AudioConversionError(BaseException):
-	def __init__(self, message):
-		self.message = message
+    def __init__(self, message):
+        self.message = message
 
 
 class FFmpegExtractAudioPP(PostProcessor):
 class FFmpegExtractAudioPP(PostProcessor):
-	def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False):
-		PostProcessor.__init__(self, downloader)
-		if preferredcodec is None:
-			preferredcodec = 'best'
-		self._preferredcodec = preferredcodec
-		self._preferredquality = preferredquality
-		self._keepvideo = keepvideo
-		self._exes = self.detect_executables()
-
-	@staticmethod
-	def detect_executables():
-		def executable(exe):
-			try:
-				subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
-			except OSError:
-				return False
-			return exe
-		programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
-		return dict((program, executable(program)) for program in programs)
-
-	def get_audio_codec(self, path):
-		if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
-		try:
-			cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
-			handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
-			output = handle.communicate()[0]
-			if handle.wait() != 0:
-				return None
-		except (IOError, OSError):
-			return None
-		audio_codec = None
-		for line in output.split('\n'):
-			if line.startswith('codec_name='):
-				audio_codec = line.split('=')[1].strip()
-			elif line.strip() == 'codec_type=audio' and audio_codec is not None:
-				return audio_codec
-		return None
-
-	def run_ffmpeg(self, path, out_path, codec, more_opts):
-		if not self._exes['ffmpeg'] and not self._exes['avconv']:
-			raise AudioConversionError('ffmpeg or avconv not found. Please install one.')	
-		if codec is None:
-			acodec_opts = []
-		else:
-			acodec_opts = ['-acodec', codec]
-		cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
-			   + acodec_opts + more_opts +
-			   ['--', encodeFilename(out_path)])
-		p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-		stdout,stderr = p.communicate()
-		if p.returncode != 0:
-			msg = stderr.strip().split('\n')[-1]
-			raise AudioConversionError(msg)
-
-	def run(self, information):
-		path = information['filepath']
-
-		filecodec = self.get_audio_codec(path)
-		if filecodec is None:
-			self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
-			return None
-
-		more_opts = []
-		if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
-			if self._preferredcodec == 'm4a' and filecodec == 'aac':
-				# Lossless, but in another container
-				acodec = 'copy'
-				extension = self._preferredcodec
-				more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
-			elif filecodec in ['aac', 'mp3', 'vorbis']:
-				# Lossless if possible
-				acodec = 'copy'
-				extension = filecodec
-				if filecodec == 'aac':
-					more_opts = ['-f', 'adts']
-				if filecodec == 'vorbis':
-					extension = 'ogg'
-			else:
-				# MP3 otherwise.
-				acodec = 'libmp3lame'
-				extension = 'mp3'
-				more_opts = []
-				if self._preferredquality is not None:
-					if int(self._preferredquality) < 10:
-						more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
-					else:
-						more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
-		else:
-			# We convert the audio (lossy)
-			acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
-			extension = self._preferredcodec
-			more_opts = []
-			if self._preferredquality is not None:
-				if int(self._preferredquality) < 10:
-					more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
-				else:
-					more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
-			if self._preferredcodec == 'aac':
-				more_opts += ['-f', 'adts']
-			if self._preferredcodec == 'm4a':
-				more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
-			if self._preferredcodec == 'vorbis':
-				extension = 'ogg'
-			if self._preferredcodec == 'wav':
-				extension = 'wav'
-				more_opts += ['-f', 'wav']
-
-		prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
-		new_path = prefix + sep + extension
-		self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
-		try:
-			self.run_ffmpeg(path, new_path, acodec, more_opts)
-		except:
-			etype,e,tb = sys.exc_info()
-			if isinstance(e, AudioConversionError):
-				self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
-			else:
-				self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
-			return None
-
- 		# Try to update the date time for extracted audio file.
-		if information.get('filetime') is not None:
-			try:
-				os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
-			except:
-				self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
-
-		if not self._keepvideo:
-			try:
-				os.remove(encodeFilename(path))
-			except (IOError, OSError):
-				self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
-				return None
-
-		information['filepath'] = new_path
-		return information
+    def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False, nopostoverwrites=False):
+        PostProcessor.__init__(self, downloader)
+        if preferredcodec is None:
+            preferredcodec = 'best'
+        self._preferredcodec = preferredcodec
+        self._preferredquality = preferredquality
+        self._keepvideo = keepvideo
+        self._nopostoverwrites = nopostoverwrites
+        self._exes = self.detect_executables()
+
+    @staticmethod
+    def detect_executables():
+        def executable(exe):
+            try:
+                subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+            except OSError:
+                return False
+            return exe
+        programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
+        return dict((program, executable(program)) for program in programs)
+
+    def get_audio_codec(self, path):
+        if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
+        try:
+            cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
+            handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
+            output = handle.communicate()[0]
+            if handle.wait() != 0:
+                return None
+        except (IOError, OSError):
+            return None
+        audio_codec = None
+        for line in output.decode('ascii', 'ignore').split('\n'):
+            if line.startswith('codec_name='):
+                audio_codec = line.split('=')[1].strip()
+            elif line.strip() == 'codec_type=audio' and audio_codec is not None:
+                return audio_codec
+        return None
+
+    def run_ffmpeg(self, path, out_path, codec, more_opts):
+        if not self._exes['ffmpeg'] and not self._exes['avconv']:
+            raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
+        if codec is None:
+            acodec_opts = []
+        else:
+            acodec_opts = ['-acodec', codec]
+        cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
+               + acodec_opts + more_opts +
+               ['--', encodeFilename(out_path)])
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        stdout,stderr = p.communicate()
+        if p.returncode != 0:
+            msg = stderr.strip().split('\n')[-1]
+            raise AudioConversionError(msg)
+
+    def run(self, information):
+        path = information['filepath']
+
+        filecodec = self.get_audio_codec(path)
+        if filecodec is None:
+            self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
+            return None
+
+        more_opts = []
+        if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
+            if self._preferredcodec == 'm4a' and filecodec == 'aac':
+                # Lossless, but in another container
+                acodec = 'copy'
+                extension = self._preferredcodec
+                more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
+            elif filecodec in ['aac', 'mp3', 'vorbis']:
+                # Lossless if possible
+                acodec = 'copy'
+                extension = filecodec
+                if filecodec == 'aac':
+                    more_opts = ['-f', 'adts']
+                if filecodec == 'vorbis':
+                    extension = 'ogg'
+            else:
+                # MP3 otherwise.
+                acodec = 'libmp3lame'
+                extension = 'mp3'
+                more_opts = []
+                if self._preferredquality is not None:
+                    if int(self._preferredquality) < 10:
+                        more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+                    else:
+                        more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
+        else:
+            # We convert the audio (lossy)
+            acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
+            extension = self._preferredcodec
+            more_opts = []
+            if self._preferredquality is not None:
+                if int(self._preferredquality) < 10:
+                    more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+                else:
+                    more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
+            if self._preferredcodec == 'aac':
+                more_opts += ['-f', 'adts']
+            if self._preferredcodec == 'm4a':
+                more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
+            if self._preferredcodec == 'vorbis':
+                extension = 'ogg'
+            if self._preferredcodec == 'wav':
+                extension = 'wav'
+                more_opts += ['-f', 'wav']
+
+        prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
+        new_path = prefix + sep + extension
+        try:
+            if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
+                self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
+            else:
+                self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
+                self.run_ffmpeg(path, new_path, acodec, more_opts)
+        except:
+            etype,e,tb = sys.exc_info()
+            if isinstance(e, AudioConversionError):
+                self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
+            else:
+                self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
+            return None
+
+        # Try to update the date time for extracted audio file.
+        if information.get('filetime') is not None:
+            try:
+                os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
+            except:
+                self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
+
+        if not self._keepvideo:
+            try:
+                os.remove(encodeFilename(path))
+            except (IOError, OSError):
+                self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
+                return None
+
+        information['filepath'] = new_path
+        return information

+ 467 - 541
youtube_dl/__init__.py

@@ -2,33 +2,30 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 
 
 from __future__ import with_statement
 from __future__ import with_statement
+from __future__ import absolute_import
 
 
 __authors__  = (
 __authors__  = (
-	'Ricardo Garcia Gonzalez',
-	'Danny Colligan',
-	'Benjamin Johnson',
-	'Vasyl\' Vavrychuk',
-	'Witold Baryluk',
-	'Paweł Paprota',
-	'Gergely Imreh',
-	'Rogério Brito',
-	'Philipp Hagemeister',
-	'Sören Schulze',
-	'Kevin Ngo',
-	'Ori Avtalion',
-	'shizeeg',
-	'Filippo Valsorda',
-	)
+    'Ricardo Garcia Gonzalez',
+    'Danny Colligan',
+    'Benjamin Johnson',
+    'Vasyl\' Vavrychuk',
+    'Witold Baryluk',
+    'Paweł Paprota',
+    'Gergely Imreh',
+    'Rogério Brito',
+    'Philipp Hagemeister',
+    'Sören Schulze',
+    'Kevin Ngo',
+    'Ori Avtalion',
+    'shizeeg',
+    'Filippo Valsorda',
+    'Christian Albrecht',
+    'Dave Vasilevsky',
+    'Jaime Marquínez Ferrándiz',
+    )
 
 
 __license__ = 'Public Domain'
 __license__ = 'Public Domain'
-__version__ = '2012.10.09'
 
 
-UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
-UPDATE_URL_VERSION = 'https://raw.github.com/rg3/youtube-dl/master/LATEST_VERSION'
-UPDATE_URL_EXE = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl.exe'
-
-
-import cookielib
 import getpass
 import getpass
 import optparse
 import optparse
 import os
 import os
@@ -37,530 +34,459 @@ import shlex
 import socket
 import socket
 import subprocess
 import subprocess
 import sys
 import sys
-import urllib2
 import warnings
 import warnings
+import platform
 
 
-from utils import *
-from FileDownloader import *
-from InfoExtractors import *
-from PostProcessor import *
-
-def updateSelf(downloader, filename):
-	''' Update the program file with the latest version from the repository '''
-	# Note: downloader only used for options
-	
-	if not os.access(filename, os.W_OK):
-		sys.exit('ERROR: no write permissions on %s' % filename)
-
-	downloader.to_screen(u'Updating to latest version...')
-
-	urlv = urllib2.urlopen(UPDATE_URL_VERSION)
-	newversion = urlv.read().strip()
-	if newversion == __version__:
-		downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
-		return
-	urlv.close()
-
-	if hasattr(sys, "frozen"): #py2exe
-		exe = os.path.abspath(filename)
-		directory = os.path.dirname(exe)
-		if not os.access(directory, os.W_OK):
-			sys.exit('ERROR: no write permissions on %s' % directory)
-			
-		try:
-			urlh = urllib2.urlopen(UPDATE_URL_EXE)
-			newcontent = urlh.read()
-			urlh.close()
-			with open(exe + '.new', 'wb') as outf:
-				outf.write(newcontent)
-		except (IOError, OSError), err:
-			sys.exit('ERROR: unable to download latest version')
-			
-		try:
-			bat = os.path.join(directory, 'youtube-dl-updater.bat')
-			b = open(bat, 'w')
-			
-			print >> b, """
-echo Updating youtube-dl...
-ping 127.0.0.1 -n 5 -w 1000 > NUL
-move /Y "%s.new" "%s"
-del "%s"
-			""" %(exe, exe, bat)
-			
-			b.close()
-			
-			os.startfile(bat)
-		except (IOError, OSError), err:
-			sys.exit('ERROR: unable to overwrite current version')
-
-	else:
-		try:
-			urlh = urllib2.urlopen(UPDATE_URL)
-			newcontent = urlh.read()
-			urlh.close()
-		except (IOError, OSError), err:
-			sys.exit('ERROR: unable to download latest version')
-
-		try:
-			with open(filename, 'wb') as outf:
-				outf.write(newcontent)
-		except (IOError, OSError), err:
-			sys.exit('ERROR: unable to overwrite current version')
-
-	downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
+from .utils import *
+from .update import update_self
+from .version import __version__
+from .FileDownloader import *
+from .InfoExtractors import gen_extractors
+from .PostProcessor import *
 
 
 def parseOpts():
 def parseOpts():
-	def _readOptions(filename_bytes):
-		try:
-			optionf = open(filename_bytes)
-		except IOError:
-			return [] # silently skip if file is not present
-		try:
-			res = []
-			for l in optionf:
-				res += shlex.split(l, comments=True)
-		finally:
-			optionf.close()
-		return res
-
-	def _format_option_string(option):
-		''' ('-o', '--option') -> -o, --format METAVAR'''
-
-		opts = []
-
-		if option._short_opts: opts.append(option._short_opts[0])
-		if option._long_opts: opts.append(option._long_opts[0])
-		if len(opts) > 1: opts.insert(1, ', ')
-
-		if option.takes_value(): opts.append(' %s' % option.metavar)
-
-		return "".join(opts)
-
-	def _find_term_columns():
-		columns = os.environ.get('COLUMNS', None)
-		if columns:
-			return int(columns)
-
-		try:
-			sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-			out,err = sp.communicate()
-			return int(out.split()[1])
-		except:
-			pass
-		return None
-
-	max_width = 80
-	max_help_position = 80
-
-	# No need to wrap help messages if we're on a wide console
-	columns = _find_term_columns()
-	if columns: max_width = columns
-
-	fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
-	fmt.format_option_strings = _format_option_string
-
-	kw = {
-		'version'   : __version__,
-		'formatter' : fmt,
-		'usage' : '%prog [options] url [url...]',
-		'conflict_handler' : 'resolve',
-	}
-
-	parser = optparse.OptionParser(**kw)
-
-	# option groups
-	general        = optparse.OptionGroup(parser, 'General Options')
-	selection      = optparse.OptionGroup(parser, 'Video Selection')
-	authentication = optparse.OptionGroup(parser, 'Authentication Options')
-	video_format   = optparse.OptionGroup(parser, 'Video Format Options')
-	postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
-	filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
-	verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-
-	general.add_option('-h', '--help',
-			action='help', help='print this help text and exit')
-	general.add_option('-v', '--version',
-			action='version', help='print program version and exit')
-	general.add_option('-U', '--update',
-			action='store_true', dest='update_self', help='update this program to latest version')
-	general.add_option('-i', '--ignore-errors',
-			action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
-	general.add_option('-r', '--rate-limit',
-			dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
-	general.add_option('-R', '--retries',
-			dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
-	general.add_option('--dump-user-agent',
-			action='store_true', dest='dump_user_agent',
-			help='display the current browser identification', default=False)
-	general.add_option('--user-agent',
-			dest='user_agent', help='specify a custom user agent', metavar='UA')
-	general.add_option('--list-extractors',
-			action='store_true', dest='list_extractors',
-			help='List all supported extractors and the URLs they would handle', default=False)
-
-	selection.add_option('--playlist-start',
-			dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
-	selection.add_option('--playlist-end',
-			dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
-	selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
-	selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
-	selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
-
-	authentication.add_option('-u', '--username',
-			dest='username', metavar='USERNAME', help='account username')
-	authentication.add_option('-p', '--password',
-			dest='password', metavar='PASSWORD', help='account password')
-	authentication.add_option('-n', '--netrc',
-			action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
-
-
-	video_format.add_option('-f', '--format',
-			action='store', dest='format', metavar='FORMAT', help='video format code')
-	video_format.add_option('--all-formats',
-			action='store_const', dest='format', help='download all available video formats', const='all')
-	video_format.add_option('--prefer-free-formats',
-			action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
-	video_format.add_option('--max-quality',
-			action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
-	video_format.add_option('-F', '--list-formats',
-			action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
-	video_format.add_option('--write-srt',
-			action='store_true', dest='writesubtitles',
-			help='write video closed captions to a .srt file (currently youtube only)', default=False)
-	video_format.add_option('--srt-lang',
-			action='store', dest='subtitleslang', metavar='LANG',
-			help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
-
-
-	verbosity.add_option('-q', '--quiet',
-			action='store_true', dest='quiet', help='activates quiet mode', default=False)
-	verbosity.add_option('-s', '--simulate',
-			action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
-	verbosity.add_option('--skip-download',
-			action='store_true', dest='skip_download', help='do not download the video', default=False)
-	verbosity.add_option('-g', '--get-url',
-			action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
-	verbosity.add_option('-e', '--get-title',
-			action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
-	verbosity.add_option('--get-thumbnail',
-			action='store_true', dest='getthumbnail',
-			help='simulate, quiet but print thumbnail URL', default=False)
-	verbosity.add_option('--get-description',
-			action='store_true', dest='getdescription',
-			help='simulate, quiet but print video description', default=False)
-	verbosity.add_option('--get-filename',
-			action='store_true', dest='getfilename',
-			help='simulate, quiet but print output filename', default=False)
-	verbosity.add_option('--get-format',
-			action='store_true', dest='getformat',
-			help='simulate, quiet but print output format', default=False)
-	verbosity.add_option('--no-progress',
-			action='store_true', dest='noprogress', help='do not print progress bar', default=False)
-	verbosity.add_option('--console-title',
-			action='store_true', dest='consoletitle',
-			help='display progress in console titlebar', default=False)
-	verbosity.add_option('-v', '--verbose',
-			action='store_true', dest='verbose', help='print various debugging information', default=False)
-
-
-	filesystem.add_option('-t', '--title',
-			action='store_true', dest='usetitle', help='use title in file name', default=False)
-	filesystem.add_option('--id',
-			action='store_true', dest='useid', help='use video ID in file name', default=False)
-	filesystem.add_option('-l', '--literal',
-			action='store_true', dest='useliteral', help='use literal title in file name', default=False)
-	filesystem.add_option('-A', '--auto-number',
-			action='store_true', dest='autonumber',
-			help='number downloaded files starting from 00000', default=False)
-	filesystem.add_option('-o', '--output',
-			dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout.')
-	filesystem.add_option('-a', '--batch-file',
-			dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
-	filesystem.add_option('-w', '--no-overwrites',
-			action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
-	filesystem.add_option('-c', '--continue',
-			action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
-	filesystem.add_option('--no-continue',
-			action='store_false', dest='continue_dl',
-			help='do not resume partially downloaded files (restart from beginning)')
-	filesystem.add_option('--cookies',
-			dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
-	filesystem.add_option('--no-part',
-			action='store_true', dest='nopart', help='do not use .part files', default=False)
-	filesystem.add_option('--no-mtime',
-			action='store_false', dest='updatetime',
-			help='do not use the Last-modified header to set the file modification time', default=True)
-	filesystem.add_option('--write-description',
-			action='store_true', dest='writedescription',
-			help='write video description to a .description file', default=False)
-	filesystem.add_option('--write-info-json',
-			action='store_true', dest='writeinfojson',
-			help='write video metadata to a .info.json file', default=False)
-
-
-	postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
-			help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
-	postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
-			help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
-	postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
-			help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
-	postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
-			help='keeps the video file on disk after the post-processing; the video is erased by default')
-
-
-	parser.add_option_group(general)
-	parser.add_option_group(selection)
-	parser.add_option_group(filesystem)
-	parser.add_option_group(verbosity)
-	parser.add_option_group(video_format)
-	parser.add_option_group(authentication)
-	parser.add_option_group(postproc)
-
-	xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
-	if xdg_config_home:
-		userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
-	else:
-		userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
-	argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
-	opts, args = parser.parse_args(argv)
-
-	return parser, opts, args
-
-def gen_extractors():
-	""" Return a list of an instance of every supported extractor.
-	The order does matter; the first extractor matched is the one handling the URL.
-	"""
-	return [
-		YoutubePlaylistIE(),
-		YoutubeChannelIE(),
-		YoutubeUserIE(),
-		YoutubeSearchIE(),
-		YoutubeIE(),
-		MetacafeIE(),
-		DailymotionIE(),
-		GoogleIE(),
-		GoogleSearchIE(),
-		PhotobucketIE(),
-		YahooIE(),
-		YahooSearchIE(),
-		DepositFilesIE(),
-		FacebookIE(),
-		BlipTVUserIE(),
-		BlipTVIE(),
-		VimeoIE(),
-		MyVideoIE(),
-		ComedyCentralIE(),
-		EscapistIE(),
-		CollegeHumorIE(),
-		XVideosIE(),
-		SoundcloudIE(),
-		InfoQIE(),
-		MixcloudIE(),
-		StanfordOpenClassroomIE(),
-		MTVIE(),
-		YoukuIE(),
-		XNXXIE(),
-		GooglePlusIE(),
-		PornotubeIE(),
-		YouPornIE(),
-		YouJizzIE(),
-		GenericIE()
-	]
+    def _readOptions(filename_bytes):
+        try:
+            optionf = open(filename_bytes)
+        except IOError:
+            return [] # silently skip if file is not present
+        try:
+            res = []
+            for l in optionf:
+                res += shlex.split(l, comments=True)
+        finally:
+            optionf.close()
+        return res
+
+    def _format_option_string(option):
+        ''' ('-o', '--option') -> -o, --format METAVAR'''
+
+        opts = []
+
+        if option._short_opts:
+            opts.append(option._short_opts[0])
+        if option._long_opts:
+            opts.append(option._long_opts[0])
+        if len(opts) > 1:
+            opts.insert(1, ', ')
+
+        if option.takes_value(): opts.append(' %s' % option.metavar)
+
+        return "".join(opts)
+
+    def _find_term_columns():
+        columns = os.environ.get('COLUMNS', None)
+        if columns:
+            return int(columns)
+
+        try:
+            sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            out,err = sp.communicate()
+            return int(out.split()[1])
+        except:
+            pass
+        return None
+
+    max_width = 80
+    max_help_position = 80
+
+    # No need to wrap help messages if we're on a wide console
+    columns = _find_term_columns()
+    if columns: max_width = columns
+
+    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+    fmt.format_option_strings = _format_option_string
+
+    kw = {
+        'version'   : __version__,
+        'formatter' : fmt,
+        'usage' : '%prog [options] url [url...]',
+        'conflict_handler' : 'resolve',
+    }
+
+    parser = optparse.OptionParser(**kw)
+
+    # option groups
+    general        = optparse.OptionGroup(parser, 'General Options')
+    selection      = optparse.OptionGroup(parser, 'Video Selection')
+    authentication = optparse.OptionGroup(parser, 'Authentication Options')
+    video_format   = optparse.OptionGroup(parser, 'Video Format Options')
+    postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
+    filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
+    verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+
+    general.add_option('-h', '--help',
+            action='help', help='print this help text and exit')
+    general.add_option('-v', '--version',
+            action='version', help='print program version and exit')
+    general.add_option('-U', '--update',
+            action='store_true', dest='update_self', help='update this program to latest version')
+    general.add_option('-i', '--ignore-errors',
+            action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
+    general.add_option('-r', '--rate-limit',
+            dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
+    general.add_option('-R', '--retries',
+            dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
+    general.add_option('--buffer-size',
+            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024")
+    general.add_option('--no-resize-buffer',
+            action='store_true', dest='noresizebuffer',
+            help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
+    general.add_option('--dump-user-agent',
+            action='store_true', dest='dump_user_agent',
+            help='display the current browser identification', default=False)
+    general.add_option('--user-agent',
+            dest='user_agent', help='specify a custom user agent', metavar='UA')
+    general.add_option('--list-extractors',
+            action='store_true', dest='list_extractors',
+            help='List all supported extractors and the URLs they would handle', default=False)
+    general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
+
+    selection.add_option('--playlist-start',
+            dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
+    selection.add_option('--playlist-end',
+            dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
+    selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
+    selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
+    selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
+
+    authentication.add_option('-u', '--username',
+            dest='username', metavar='USERNAME', help='account username')
+    authentication.add_option('-p', '--password',
+            dest='password', metavar='PASSWORD', help='account password')
+    authentication.add_option('-n', '--netrc',
+            action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
+
+
+    video_format.add_option('-f', '--format',
+            action='store', dest='format', metavar='FORMAT', help='video format code')
+    video_format.add_option('--all-formats',
+            action='store_const', dest='format', help='download all available video formats', const='all')
+    video_format.add_option('--prefer-free-formats',
+            action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
+    video_format.add_option('--max-quality',
+            action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+    video_format.add_option('-F', '--list-formats',
+            action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
+    video_format.add_option('--write-srt',
+            action='store_true', dest='writesubtitles',
+            help='write video closed captions to a .srt file (currently youtube only)', default=False)
+    video_format.add_option('--srt-lang',
+            action='store', dest='subtitleslang', metavar='LANG',
+            help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
+
+
+    verbosity.add_option('-q', '--quiet',
+            action='store_true', dest='quiet', help='activates quiet mode', default=False)
+    verbosity.add_option('-s', '--simulate',
+            action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
+    verbosity.add_option('--skip-download',
+            action='store_true', dest='skip_download', help='do not download the video', default=False)
+    verbosity.add_option('-g', '--get-url',
+            action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
+    verbosity.add_option('-e', '--get-title',
+            action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
+    verbosity.add_option('--get-thumbnail',
+            action='store_true', dest='getthumbnail',
+            help='simulate, quiet but print thumbnail URL', default=False)
+    verbosity.add_option('--get-description',
+            action='store_true', dest='getdescription',
+            help='simulate, quiet but print video description', default=False)
+    verbosity.add_option('--get-filename',
+            action='store_true', dest='getfilename',
+            help='simulate, quiet but print output filename', default=False)
+    verbosity.add_option('--get-format',
+            action='store_true', dest='getformat',
+            help='simulate, quiet but print output format', default=False)
+    verbosity.add_option('--no-progress',
+            action='store_true', dest='noprogress', help='do not print progress bar', default=False)
+    verbosity.add_option('--console-title',
+            action='store_true', dest='consoletitle',
+            help='display progress in console titlebar', default=False)
+    verbosity.add_option('-v', '--verbose',
+            action='store_true', dest='verbose', help='print various debugging information', default=False)
+
+
+    filesystem.add_option('-t', '--title',
+            action='store_true', dest='usetitle', help='use title in file name', default=False)
+    filesystem.add_option('--id',
+            action='store_true', dest='useid', help='use video ID in file name', default=False)
+    filesystem.add_option('-l', '--literal',
+            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
+    filesystem.add_option('-A', '--auto-number',
+            action='store_true', dest='autonumber',
+            help='number downloaded files starting from 00000', default=False)
+    filesystem.add_option('-o', '--output',
+            dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')
+    filesystem.add_option('--restrict-filenames',
+            action='store_true', dest='restrictfilenames',
+            help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
+    filesystem.add_option('-a', '--batch-file',
+            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
+    filesystem.add_option('-w', '--no-overwrites',
+            action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+    filesystem.add_option('-c', '--continue',
+            action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True)
+    filesystem.add_option('--no-continue',
+            action='store_false', dest='continue_dl',
+            help='do not resume partially downloaded files (restart from beginning)')
+    filesystem.add_option('--cookies',
+            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
+    filesystem.add_option('--no-part',
+            action='store_true', dest='nopart', help='do not use .part files', default=False)
+    filesystem.add_option('--no-mtime',
+            action='store_false', dest='updatetime',
+            help='do not use the Last-modified header to set the file modification time', default=True)
+    filesystem.add_option('--write-description',
+            action='store_true', dest='writedescription',
+            help='write video description to a .description file', default=False)
+    filesystem.add_option('--write-info-json',
+            action='store_true', dest='writeinfojson',
+            help='write video metadata to a .info.json file', default=False)
+
+
+    postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
+            help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
+    postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+            help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
+    postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
+            help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
+    postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
+            help='keeps the video file on disk after the post-processing; the video is erased by default')
+    postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
+            help='do not overwrite post-processed files; the post-processed files are overwritten by default')
+
+
+    parser.add_option_group(general)
+    parser.add_option_group(selection)
+    parser.add_option_group(filesystem)
+    parser.add_option_group(verbosity)
+    parser.add_option_group(video_format)
+    parser.add_option_group(authentication)
+    parser.add_option_group(postproc)
+
+    xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
+    if xdg_config_home:
+        userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
+    else:
+        userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
+    argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
+    opts, args = parser.parse_args(argv)
+
+    return parser, opts, args
 
 
 def _real_main():
 def _real_main():
-	parser, opts, args = parseOpts()
-
-	# Open appropriate CookieJar
-	if opts.cookiefile is None:
-		jar = cookielib.CookieJar()
-	else:
-		try:
-			jar = cookielib.MozillaCookieJar(opts.cookiefile)
-			if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
-				jar.load()
-		except (IOError, OSError), err:
-			sys.exit(u'ERROR: unable to open cookie file')
-	# Set user agent
-	if opts.user_agent is not None:
-		std_headers['User-Agent'] = opts.user_agent
-
-	# Dump user agent
-	if opts.dump_user_agent:
-		print std_headers['User-Agent']
-		sys.exit(0)
-
-	# Batch file verification
-	batchurls = []
-	if opts.batchfile is not None:
-		try:
-			if opts.batchfile == '-':
-				batchfd = sys.stdin
-			else:
-				batchfd = open(opts.batchfile, 'r')
-			batchurls = batchfd.readlines()
-			batchurls = [x.strip() for x in batchurls]
-			batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
-		except IOError:
-			sys.exit(u'ERROR: batch file could not be read')
-	all_urls = batchurls + args
-	all_urls = map(lambda url: url.strip(), all_urls)
-
-	# General configuration
-	cookie_processor = urllib2.HTTPCookieProcessor(jar)
-	proxy_handler = urllib2.ProxyHandler()
-	opener = urllib2.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
-	urllib2.install_opener(opener)
-	socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
-
-	extractors = gen_extractors()
-
-	if opts.list_extractors:
-		for ie in extractors:
-			print(ie.IE_NAME)
-			matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
-			all_urls = filter(lambda url: url not in matchedUrls, all_urls)
-			for mu in matchedUrls:
-				print(u'  ' + mu)
-		sys.exit(0)
-
-	# Conflicting, missing and erroneous options
-	if opts.usenetrc and (opts.username is not None or opts.password is not None):
-		parser.error(u'using .netrc conflicts with giving username/password')
-	if opts.password is not None and opts.username is None:
-		parser.error(u'account username missing')
-	if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber or opts.useid):
-		parser.error(u'using output template conflicts with using title, literal title, video ID or auto number')
-	if opts.usetitle and opts.useliteral:
-		parser.error(u'using title conflicts with using literal title')
-	if opts.usetitle and opts.useid:
-		parser.error(u'using title conflicts with using video ID')
-	if opts.useliteral and opts.useid:
-		parser.error(u'using literal title conflicts with using video ID')
-	if opts.username is not None and opts.password is None:
-		opts.password = getpass.getpass(u'Type account password and press return:')
-	if opts.ratelimit is not None:
-		numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
-		if numeric_limit is None:
-			parser.error(u'invalid rate limit specified')
-		opts.ratelimit = numeric_limit
-	if opts.retries is not None:
-		try:
-			opts.retries = long(opts.retries)
-		except (TypeError, ValueError), err:
-			parser.error(u'invalid retry count specified')
-	try:
-		opts.playliststart = int(opts.playliststart)
-		if opts.playliststart <= 0:
-			raise ValueError(u'Playlist start must be positive')
-	except (TypeError, ValueError), err:
-		parser.error(u'invalid playlist start number specified')
-	try:
-		opts.playlistend = int(opts.playlistend)
-		if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
-			raise ValueError(u'Playlist end must be greater than playlist start')
-	except (TypeError, ValueError), err:
-		parser.error(u'invalid playlist end number specified')
-	if opts.extractaudio:
-		if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
-			parser.error(u'invalid audio format specified')
-	if opts.audioquality:
-		opts.audioquality = opts.audioquality.strip('k').strip('K')
-		if not opts.audioquality.isdigit():
-			parser.error(u'invalid audio quality specified')
-
-	# File downloader
-	fd = FileDownloader({
-		'usenetrc': opts.usenetrc,
-		'username': opts.username,
-		'password': opts.password,
-		'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
-		'forceurl': opts.geturl,
-		'forcetitle': opts.gettitle,
-		'forcethumbnail': opts.getthumbnail,
-		'forcedescription': opts.getdescription,
-		'forcefilename': opts.getfilename,
-		'forceformat': opts.getformat,
-		'simulate': opts.simulate,
-		'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
-		'format': opts.format,
-		'format_limit': opts.format_limit,
-		'listformats': opts.listformats,
-		'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
-			or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
-			or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
-			or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
-			or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
-			or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
-			or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
-			or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
-			or (opts.useid and u'%(id)s.%(ext)s')
-			or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
-			or u'%(id)s.%(ext)s'),
-		'ignoreerrors': opts.ignoreerrors,
-		'ratelimit': opts.ratelimit,
-		'nooverwrites': opts.nooverwrites,
-		'retries': opts.retries,
-		'continuedl': opts.continue_dl,
-		'noprogress': opts.noprogress,
-		'playliststart': opts.playliststart,
-		'playlistend': opts.playlistend,
-		'logtostderr': opts.outtmpl == '-',
-		'consoletitle': opts.consoletitle,
-		'nopart': opts.nopart,
-		'updatetime': opts.updatetime,
-		'writedescription': opts.writedescription,
-		'writeinfojson': opts.writeinfojson,
-		'writesubtitles': opts.writesubtitles,
-		'subtitleslang': opts.subtitleslang,
-		'matchtitle': opts.matchtitle,
-		'rejecttitle': opts.rejecttitle,
-		'max_downloads': opts.max_downloads,
-		'prefer_free_formats': opts.prefer_free_formats,
-		'verbose': opts.verbose,
-		})
-
-	if opts.verbose:
-		fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
-
-	for extractor in extractors:
-		fd.add_info_extractor(extractor)
-
-	# PostProcessors
-	if opts.extractaudio:
-		fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo))
-
-	# Update version
-	if opts.update_self:
-		updateSelf(fd, sys.argv[0])
-
-	# Maybe do nothing
-	if len(all_urls) < 1:
-		if not opts.update_self:
-			parser.error(u'you must provide at least one URL')
-		else:
-			sys.exit()
-	
-	try:
-		retcode = fd.download(all_urls)
-	except MaxDownloadsReached:
-		fd.to_screen(u'--max-download limit reached, aborting.')
-		retcode = 101
-
-	# Dump cookie jar if requested
-	if opts.cookiefile is not None:
-		try:
-			jar.save()
-		except (IOError, OSError), err:
-			sys.exit(u'ERROR: unable to save cookie jar')
-
-	sys.exit(retcode)
+    parser, opts, args = parseOpts()
+
+    # Open appropriate CookieJar
+    if opts.cookiefile is None:
+        jar = compat_cookiejar.CookieJar()
+    else:
+        try:
+            jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
+            if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
+                jar.load()
+        except (IOError, OSError) as err:
+            sys.exit(u'ERROR: unable to open cookie file')
+    # Set user agent
+    if opts.user_agent is not None:
+        std_headers['User-Agent'] = opts.user_agent
+
+    # Dump user agent
+    if opts.dump_user_agent:
+        print(std_headers['User-Agent'])
+        sys.exit(0)
+
+    # Batch file verification
+    batchurls = []
+    if opts.batchfile is not None:
+        try:
+            if opts.batchfile == '-':
+                batchfd = sys.stdin
+            else:
+                batchfd = open(opts.batchfile, 'r')
+            batchurls = batchfd.readlines()
+            batchurls = [x.strip() for x in batchurls]
+            batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
+        except IOError:
+            sys.exit(u'ERROR: batch file could not be read')
+    all_urls = batchurls + args
+    all_urls = [url.strip() for url in all_urls]
+
+    # General configuration
+    cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
+    proxy_handler = compat_urllib_request.ProxyHandler()
+    opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+    compat_urllib_request.install_opener(opener)
+    socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
+
+    extractors = gen_extractors()
+
+    if opts.list_extractors:
+        for ie in extractors:
+            print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
+            matchedUrls = [url for url in all_urls if ie.suitable(url)]
+            all_urls = [url for url in all_urls if url not in matchedUrls]
+            for mu in matchedUrls:
+                print(u'  ' + mu)
+        sys.exit(0)
+
+    # Conflicting, missing and erroneous options
+    if opts.usenetrc and (opts.username is not None or opts.password is not None):
+        parser.error(u'using .netrc conflicts with giving username/password')
+    if opts.password is not None and opts.username is None:
+        parser.error(u'account username missing')
+    if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
+        parser.error(u'using output template conflicts with using title, video ID or auto number')
+    if opts.usetitle and opts.useid:
+        parser.error(u'using title conflicts with using video ID')
+    if opts.username is not None and opts.password is None:
+        opts.password = getpass.getpass(u'Type account password and press return:')
+    if opts.ratelimit is not None:
+        numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
+        if numeric_limit is None:
+            parser.error(u'invalid rate limit specified')
+        opts.ratelimit = numeric_limit
+    if opts.retries is not None:
+        try:
+            opts.retries = int(opts.retries)
+        except (TypeError, ValueError) as err:
+            parser.error(u'invalid retry count specified')
+    if opts.buffersize is not None:
+        numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
+        if numeric_buffersize is None:
+            parser.error(u'invalid buffer size specified')
+        opts.buffersize = numeric_buffersize
+    try:
+        opts.playliststart = int(opts.playliststart)
+        if opts.playliststart <= 0:
+            raise ValueError(u'Playlist start must be positive')
+    except (TypeError, ValueError) as err:
+        parser.error(u'invalid playlist start number specified')
+    try:
+        opts.playlistend = int(opts.playlistend)
+        if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
+            raise ValueError(u'Playlist end must be greater than playlist start')
+    except (TypeError, ValueError) as err:
+        parser.error(u'invalid playlist end number specified')
+    if opts.extractaudio:
+        if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
+            parser.error(u'invalid audio format specified')
+    if opts.audioquality:
+        opts.audioquality = opts.audioquality.strip('k').strip('K')
+        if not opts.audioquality.isdigit():
+            parser.error(u'invalid audio quality specified')
+
+    if sys.version_info < (3,):
+        # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
+        if opts.outtmpl is not None:
+            opts.outtmpl = opts.outtmpl.decode(preferredencoding())
+    outtmpl =((opts.outtmpl is not None and opts.outtmpl)
+            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
+            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
+            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
+            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
+            or (opts.useid and u'%(id)s.%(ext)s')
+            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
+            or u'%(id)s.%(ext)s')
+    # File downloader
+    fd = FileDownloader({
+        'usenetrc': opts.usenetrc,
+        'username': opts.username,
+        'password': opts.password,
+        'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
+        'forceurl': opts.geturl,
+        'forcetitle': opts.gettitle,
+        'forcethumbnail': opts.getthumbnail,
+        'forcedescription': opts.getdescription,
+        'forcefilename': opts.getfilename,
+        'forceformat': opts.getformat,
+        'simulate': opts.simulate,
+        'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
+        'format': opts.format,
+        'format_limit': opts.format_limit,
+        'listformats': opts.listformats,
+        'outtmpl': outtmpl,
+        'restrictfilenames': opts.restrictfilenames,
+        'ignoreerrors': opts.ignoreerrors,
+        'ratelimit': opts.ratelimit,
+        'nooverwrites': opts.nooverwrites,
+        'retries': opts.retries,
+        'buffersize': opts.buffersize,
+        'noresizebuffer': opts.noresizebuffer,
+        'continuedl': opts.continue_dl,
+        'noprogress': opts.noprogress,
+        'playliststart': opts.playliststart,
+        'playlistend': opts.playlistend,
+        'logtostderr': opts.outtmpl == '-',
+        'consoletitle': opts.consoletitle,
+        'nopart': opts.nopart,
+        'updatetime': opts.updatetime,
+        'writedescription': opts.writedescription,
+        'writeinfojson': opts.writeinfojson,
+        'writesubtitles': opts.writesubtitles,
+        'subtitleslang': opts.subtitleslang,
+        'matchtitle': opts.matchtitle,
+        'rejecttitle': opts.rejecttitle,
+        'max_downloads': opts.max_downloads,
+        'prefer_free_formats': opts.prefer_free_formats,
+        'verbose': opts.verbose,
+        'test': opts.test,
+        })
+
+    if opts.verbose:
+        fd.to_screen(u'[debug] youtube-dl version ' + __version__)
+        try:
+            sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                                  cwd=os.path.dirname(os.path.abspath(__file__)))
+            out, err = sp.communicate()
+            out = out.decode().strip()
+            if re.match('[0-9a-f]+', out):
+                fd.to_screen(u'[debug] Git HEAD: ' + out)
+        except:
+            pass
+        fd.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform()))
+        fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies))
+
+    for extractor in extractors:
+        fd.add_info_extractor(extractor)
+
+    # PostProcessors
+    if opts.extractaudio:
+        fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo, nopostoverwrites=opts.nopostoverwrites))
+
+    # Update version
+    if opts.update_self:
+        update_self(fd.to_screen, opts.verbose, sys.argv[0])
+
+    # Maybe do nothing
+    if len(all_urls) < 1:
+        if not opts.update_self:
+            parser.error(u'you must provide at least one URL')
+        else:
+            sys.exit()
+
+    try:
+        retcode = fd.download(all_urls)
+    except MaxDownloadsReached:
+        fd.to_screen(u'--max-download limit reached, aborting.')
+        retcode = 101
+
+    # Dump cookie jar if requested
+    if opts.cookiefile is not None:
+        try:
+            jar.save()
+        except (IOError, OSError) as err:
+            sys.exit(u'ERROR: unable to save cookie jar')
+
+    sys.exit(retcode)
 
 
 def main():
 def main():
-	try:
-		_real_main()
-	except DownloadError:
-		sys.exit(1)
-	except SameFileError:
-		sys.exit(u'ERROR: fixed output name but more than one file to download')
-	except KeyboardInterrupt:
-		sys.exit(u'\nERROR: Interrupted by user')
+    try:
+        _real_main()
+    except DownloadError:
+        sys.exit(1)
+    except SameFileError:
+        sys.exit(u'ERROR: fixed output name but more than one file to download')
+    except KeyboardInterrupt:
+        sys.exit(u'\nERROR: Interrupted by user')

+ 13 - 3
youtube_dl/__main__.py

@@ -1,7 +1,17 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
-# -*- coding: utf-8 -*-
 
 
-import __init__
+# Execute with
+# $ python youtube_dl/__main__.py (2.6+)
+# $ python -m youtube_dl          (2.7+)
+
+import sys
+
+if __package__ is None and not hasattr(sys, "frozen"):
+    # direct call of __main__.py
+    import os.path
+    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+import youtube_dl
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-	__init__.main()
+    youtube_dl.main()

+ 160 - 0
youtube_dl/update.py

@@ -0,0 +1,160 @@
+import json
+import traceback
+import hashlib
+from zipimport import zipimporter
+
+from .utils import *
+from .version import __version__
+
+def rsa_verify(message, signature, key):
+    from struct import pack
+    from hashlib import sha256
+    from sys import version_info
+    def b(x):
+        if version_info[0] == 2: return x
+        else: return x.encode('latin1')
+    assert(type(message) == type(b('')))
+    block_size = 0
+    n = key[0]
+    while n:
+        block_size += 1
+        n >>= 8
+    signature = pow(int(signature, 16), key[1], key[0])
+    raw_bytes = []
+    while signature:
+        raw_bytes.insert(0, pack("B", signature & 0xFF))
+        signature >>= 8
+    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
+    if signature[0:2] != b('\x00\x01'): return False
+    signature = signature[2:]
+    if not b('\x00') in signature: return False
+    signature = signature[signature.index(b('\x00'))+1:]
+    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
+    signature = signature[19:]
+    if signature != sha256(message).digest(): return False
+    return True
+
+def update_self(to_screen, verbose, filename):
+    """Update the program file with the latest version from the repository"""
+
+    UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
+    VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
+    JSON_URL = UPDATE_URL + 'versions.json'
+    UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
+
+
+    if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
+        to_screen(u'It looks like you installed youtube-dl with pip, setup.py or a tarball. Please use that to update.')
+        return
+
+    # Check if there is a new version
+    try:
+        newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
+    except:
+        if verbose: to_screen(compat_str(traceback.format_exc()))
+        to_screen(u'ERROR: can\'t find the current version. Please try again later.')
+        return
+    if newversion == __version__:
+        to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
+        return
+
+    # Download and check versions info
+    try:
+        versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
+        versions_info = json.loads(versions_info)
+    except:
+        if verbose: to_screen(compat_str(traceback.format_exc()))
+        to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
+        return
+    if not 'signature' in versions_info:
+        to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
+        return
+    signature = versions_info['signature']
+    del versions_info['signature']
+    if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
+        to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
+        return
+
+    to_screen(u'Updating to version ' + versions_info['latest'] + '...')
+    version = versions_info['versions'][versions_info['latest']]
+    if version.get('notes'):
+        to_screen(u'PLEASE NOTE:')
+        for note in version['notes']:
+            to_screen(note)
+
+    if not os.access(filename, os.W_OK):
+        to_screen(u'ERROR: no write permissions on %s' % filename)
+        return
+
+    # Py2EXE
+    if hasattr(sys, "frozen"):
+        exe = os.path.abspath(filename)
+        directory = os.path.dirname(exe)
+        if not os.access(directory, os.W_OK):
+            to_screen(u'ERROR: no write permissions on %s' % directory)
+            return
+
+        try:
+            urlh = compat_urllib_request.urlopen(version['exe'][0])
+            newcontent = urlh.read()
+            urlh.close()
+        except (IOError, OSError) as err:
+            if verbose: to_screen(compat_str(traceback.format_exc()))
+            to_screen(u'ERROR: unable to download latest version')
+            return
+
+        newcontent_hash = hashlib.sha256(newcontent).hexdigest()
+        if newcontent_hash != version['exe'][1]:
+            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            return
+
+        try:
+            with open(exe + '.new', 'wb') as outf:
+                outf.write(newcontent)
+        except (IOError, OSError) as err:
+            if verbose: to_screen(compat_str(traceback.format_exc()))
+            to_screen(u'ERROR: unable to write the new version')
+            return
+
+        try:
+            bat = os.path.join(directory, 'youtube-dl-updater.bat')
+            b = open(bat, 'w')
+            b.write("""
+echo Updating youtube-dl...
+ping 127.0.0.1 -n 5 -w 1000 > NUL
+move /Y "%s.new" "%s"
+del "%s"
+            \n""" %(exe, exe, bat))
+            b.close()
+
+            os.startfile(bat)
+        except (IOError, OSError) as err:
+            if verbose: to_screen(compat_str(traceback.format_exc()))
+            to_screen(u'ERROR: unable to overwrite current version')
+            return
+
+    # Zip unix package
+    elif isinstance(globals().get('__loader__'), zipimporter):
+        try:
+            urlh = compat_urllib_request.urlopen(version['bin'][0])
+            newcontent = urlh.read()
+            urlh.close()
+        except (IOError, OSError) as err:
+            if verbose: to_screen(compat_str(traceback.format_exc()))
+            to_screen(u'ERROR: unable to download latest version')
+            return
+
+        newcontent_hash = hashlib.sha256(newcontent).hexdigest()
+        if newcontent_hash != version['bin'][1]:
+            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
+            return
+
+        try:
+            with open(filename, 'wb') as outf:
+                outf.write(newcontent)
+        except (IOError, OSError) as err:
+            if verbose: to_screen(compat_str(traceback.format_exc()))
+            to_screen(u'ERROR: unable to overwrite current version')
+            return
+
+    to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')

+ 482 - 303
youtube_dl/utils.py

@@ -2,363 +2,542 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 
 
 import gzip
 import gzip
-import htmlentitydefs
-import HTMLParser
+import io
+import json
 import locale
 import locale
 import os
 import os
 import re
 import re
 import sys
 import sys
+import traceback
 import zlib
 import zlib
-import urllib2
 import email.utils
 import email.utils
 import json
 import json
 
 
 try:
 try:
-	import cStringIO as StringIO
+    import urllib.request as compat_urllib_request
+except ImportError: # Python 2
+    import urllib2 as compat_urllib_request
+
+try:
+    import urllib.error as compat_urllib_error
+except ImportError: # Python 2
+    import urllib2 as compat_urllib_error
+
+try:
+    import urllib.parse as compat_urllib_parse
+except ImportError: # Python 2
+    import urllib as compat_urllib_parse
+
+try:
+    from urllib.parse import urlparse as compat_urllib_parse_urlparse
+except ImportError: # Python 2
+    from urlparse import urlparse as compat_urllib_parse_urlparse
+
+try:
+    import http.cookiejar as compat_cookiejar
+except ImportError: # Python 2
+    import cookielib as compat_cookiejar
+
+try:
+    import html.entities as compat_html_entities
+except ImportError: # Python 2
+    import htmlentitydefs as compat_html_entities
+
+try:
+    import html.parser as compat_html_parser
+except ImportError: # Python 2
+    import HTMLParser as compat_html_parser
+
+try:
+    import http.client as compat_http_client
+except ImportError: # Python 2
+    import httplib as compat_http_client
+
+try:
+    from subprocess import DEVNULL
+    compat_subprocess_get_DEVNULL = lambda: DEVNULL
 except ImportError:
 except ImportError:
-	import StringIO
+    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
+
+try:
+    from urllib.parse import parse_qs as compat_parse_qs
+except ImportError: # Python 2
+    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+    # Python 2's version is apparently totally broken
+    def _unquote(string, encoding='utf-8', errors='replace'):
+        if string == '':
+            return string
+        res = string.split('%')
+        if len(res) == 1:
+            return string
+        if encoding is None:
+            encoding = 'utf-8'
+        if errors is None:
+            errors = 'replace'
+        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
+        pct_sequence = b''
+        string = res[0]
+        for item in res[1:]:
+            try:
+                if not item:
+                    raise ValueError
+                pct_sequence += item[:2].decode('hex')
+                rest = item[2:]
+                if not rest:
+                    # This segment was just a single percent-encoded character.
+                    # May be part of a sequence of code units, so delay decoding.
+                    # (Stored in pct_sequence).
+                    continue
+            except ValueError:
+                rest = '%' + item
+            # Encountered non-percent-encoded characters. Flush the current
+            # pct_sequence.
+            string += pct_sequence.decode(encoding, errors) + rest
+            pct_sequence = b''
+        if pct_sequence:
+            # Flush the final pct_sequence
+            string += pct_sequence.decode(encoding, errors)
+        return string
+
+    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+                encoding='utf-8', errors='replace'):
+        qs, _coerce_result = qs, unicode
+        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+        r = []
+        for name_value in pairs:
+            if not name_value and not strict_parsing:
+                continue
+            nv = name_value.split('=', 1)
+            if len(nv) != 2:
+                if strict_parsing:
+                    raise ValueError("bad query field: %r" % (name_value,))
+                # Handle case of a control-name with no equal sign
+                if keep_blank_values:
+                    nv.append('')
+                else:
+                    continue
+            if len(nv[1]) or keep_blank_values:
+                name = nv[0].replace('+', ' ')
+                name = _unquote(name, encoding=encoding, errors=errors)
+                name = _coerce_result(name)
+                value = nv[1].replace('+', ' ')
+                value = _unquote(value, encoding=encoding, errors=errors)
+                value = _coerce_result(value)
+                r.append((name, value))
+        return r
+
+    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+                encoding='utf-8', errors='replace'):
+        parsed_result = {}
+        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
+                        encoding=encoding, errors=errors)
+        for name, value in pairs:
+            if name in parsed_result:
+                parsed_result[name].append(value)
+            else:
+                parsed_result[name] = [value]
+        return parsed_result
+
+try:
+    compat_str = unicode # Python 2
+except NameError:
+    compat_str = str
+
+try:
+    compat_chr = unichr # Python 2
+except NameError:
+    compat_chr = chr
 
 
 std_headers = {
 std_headers = {
-	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
-	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
-	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
-	'Accept-Encoding': 'gzip, deflate',
-	'Accept-Language': 'en-us,en;q=0.5',
+    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
+    'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
+    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+    'Accept-Encoding': 'gzip, deflate',
+    'Accept-Language': 'en-us,en;q=0.5',
 }
 }
 
 
 def preferredencoding():
 def preferredencoding():
-	"""Get preferred encoding.
-
-	Returns the best encoding scheme for the system, based on
-	locale.getpreferredencoding() and some further tweaks.
-	"""
-	def yield_preferredencoding():
-		try:
-			pref = locale.getpreferredencoding()
-			u'TEST'.encode(pref)
-		except:
-			pref = 'UTF-8'
-		while True:
-			yield pref
-	return yield_preferredencoding().next()
-
+    """Get preferred encoding.
+
+    Returns the best encoding scheme for the system, based on
+    locale.getpreferredencoding() and some further tweaks.
+    """
+    try:
+        pref = locale.getpreferredencoding()
+        u'TEST'.encode(pref)
+    except:
+        pref = 'UTF-8'
+
+    return pref
+
+if sys.version_info < (3,0):
+    def compat_print(s):
+        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
+else:
+    def compat_print(s):
+        assert type(s) == type(u'')
+        print(s)
+
+# In Python 2.x, json.dump expects a bytestream.
+# In Python 3.x, it writes to a character stream
+if sys.version_info < (3,0):
+    def write_json_file(obj, fn):
+        with open(fn, 'wb') as f:
+            json.dump(obj, f)
+else:
+    def write_json_file(obj, fn):
+        with open(fn, 'w', encoding='utf-8') as f:
+            json.dump(obj, f)
 
 
 def htmlentity_transform(matchobj):
 def htmlentity_transform(matchobj):
-	"""Transforms an HTML entity to a Unicode character.
-
-	This function receives a match object and is intended to be used with
-	the re.sub() function.
-	"""
-	entity = matchobj.group(1)
-
-	# Known non-numeric HTML entity
-	if entity in htmlentitydefs.name2codepoint:
-		return unichr(htmlentitydefs.name2codepoint[entity])
-
-	# Unicode character
-	mobj = re.match(ur'(?u)#(x?\d+)', entity)
-	if mobj is not None:
-		numstr = mobj.group(1)
-		if numstr.startswith(u'x'):
-			base = 16
-			numstr = u'0%s' % numstr
-		else:
-			base = 10
-		return unichr(long(numstr, base))
-
-	# Unknown entity in name, return its literal representation
-	return (u'&%s;' % entity)
-
-HTMLParser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
-class IDParser(HTMLParser.HTMLParser):
-	"""Modified HTMLParser that isolates a tag with the specified id"""
-	def __init__(self, id):
-		self.id = id
-		self.result = None
-		self.started = False
-		self.depth = {}
-		self.html = None
-		self.watch_startpos = False
-		self.error_count = 0
-		HTMLParser.HTMLParser.__init__(self)
-
-	def error(self, message):
-		#print >> sys.stderr, self.getpos()
-		if self.error_count > 10 or self.started:
-			raise HTMLParser.HTMLParseError(message, self.getpos())
-		self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
-		self.error_count += 1
-		self.goahead(1)
-
-	def loads(self, html):
-		self.html = html
-		self.feed(html)
-		self.close()
-
-	def handle_starttag(self, tag, attrs):
-		attrs = dict(attrs)
-		if self.started:
-			self.find_startpos(None)
-		if 'id' in attrs and attrs['id'] == self.id:
-			self.result = [tag]
-			self.started = True
-			self.watch_startpos = True
-		if self.started:
-			if not tag in self.depth: self.depth[tag] = 0
-			self.depth[tag] += 1
-
-	def handle_endtag(self, tag):
-		if self.started:
-			if tag in self.depth: self.depth[tag] -= 1
-			if self.depth[self.result[0]] == 0:
-				self.started = False
-				self.result.append(self.getpos())
-
-	def find_startpos(self, x):
-		"""Needed to put the start position of the result (self.result[1])
-		after the opening tag with the requested id"""
-		if self.watch_startpos:
-			self.watch_startpos = False
-			self.result.append(self.getpos())
-	handle_entityref = handle_charref = handle_data = handle_comment = \
-	handle_decl = handle_pi = unknown_decl = find_startpos
-
-	def get_result(self):
-		if self.result == None: return None
-		if len(self.result) != 3: return None
-		lines = self.html.split('\n')
-		lines = lines[self.result[1][0]-1:self.result[2][0]]
-		lines[0] = lines[0][self.result[1][1]:]
-		if len(lines) == 1:
-			lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
-		lines[-1] = lines[-1][:self.result[2][1]]
-		return '\n'.join(lines).strip()
+    """Transforms an HTML entity to a character.
+
+    This function receives a match object and is intended to be used with
+    the re.sub() function.
+    """
+    entity = matchobj.group(1)
+
+    # Known non-numeric HTML entity
+    if entity in compat_html_entities.name2codepoint:
+        return compat_chr(compat_html_entities.name2codepoint[entity])
+
+    mobj = re.match(u'(?u)#(x?\\d+)', entity)
+    if mobj is not None:
+        numstr = mobj.group(1)
+        if numstr.startswith(u'x'):
+            base = 16
+            numstr = u'0%s' % numstr
+        else:
+            base = 10
+        return compat_chr(int(numstr, base))
+
+    # Unknown entity in name, return its literal representation
+    return (u'&%s;' % entity)
+
+compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
+class AttrParser(compat_html_parser.HTMLParser):
+    """Modified HTMLParser that isolates a tag with the specified attribute"""
+    def __init__(self, attribute, value):
+        self.attribute = attribute
+        self.value = value
+        self.result = None
+        self.started = False
+        self.depth = {}
+        self.html = None
+        self.watch_startpos = False
+        self.error_count = 0
+        compat_html_parser.HTMLParser.__init__(self)
+
+    def error(self, message):
+        if self.error_count > 10 or self.started:
+            raise compat_html_parser.HTMLParseError(message, self.getpos())
+        self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
+        self.error_count += 1
+        self.goahead(1)
+
+    def loads(self, html):
+        self.html = html
+        self.feed(html)
+        self.close()
+
+    def handle_starttag(self, tag, attrs):
+        attrs = dict(attrs)
+        if self.started:
+            self.find_startpos(None)
+        if self.attribute in attrs and attrs[self.attribute] == self.value:
+            self.result = [tag]
+            self.started = True
+            self.watch_startpos = True
+        if self.started:
+            if not tag in self.depth: self.depth[tag] = 0
+            self.depth[tag] += 1
+
+    def handle_endtag(self, tag):
+        if self.started:
+            if tag in self.depth: self.depth[tag] -= 1
+            if self.depth[self.result[0]] == 0:
+                self.started = False
+                self.result.append(self.getpos())
+
+    def find_startpos(self, x):
+        """Needed to put the start position of the result (self.result[1])
+        after the opening tag with the requested id"""
+        if self.watch_startpos:
+            self.watch_startpos = False
+            self.result.append(self.getpos())
+    handle_entityref = handle_charref = handle_data = handle_comment = \
+    handle_decl = handle_pi = unknown_decl = find_startpos
+
+    def get_result(self):
+        if self.result is None:
+            return None
+        if len(self.result) != 3:
+            return None
+        lines = self.html.split('\n')
+        lines = lines[self.result[1][0]-1:self.result[2][0]]
+        lines[0] = lines[0][self.result[1][1]:]
+        if len(lines) == 1:
+            lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
+        lines[-1] = lines[-1][:self.result[2][1]]
+        return '\n'.join(lines).strip()
 
 
 def get_element_by_id(id, html):
 def get_element_by_id(id, html):
-	"""Return the content of the tag with the specified id in the passed HTML document"""
-	parser = IDParser(id)
-	try:
-		parser.loads(html)
-	except HTMLParser.HTMLParseError:
-		pass
-	return parser.get_result()
+    """Return the content of the tag with the specified ID in the passed HTML document"""
+    return get_element_by_attribute("id", id, html)
+
+def get_element_by_attribute(attribute, value, html):
+    """Return the content of the tag with the specified attribute in the passed HTML document"""
+    parser = AttrParser(attribute, value)
+    try:
+        parser.loads(html)
+    except compat_html_parser.HTMLParseError:
+        pass
+    return parser.get_result()
 
 
 
 
 def clean_html(html):
 def clean_html(html):
-	"""Clean an HTML snippet into a readable string"""
-	# Newline vs <br />
-	html = html.replace('\n', ' ')
-	html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html)
-	# Strip html tags
-	html = re.sub('<.*?>', '', html)
-	# Replace html entities
-	html = unescapeHTML(html)
-	return html
+    """Clean an HTML snippet into a readable string"""
+    # Newline vs <br />
+    html = html.replace('\n', ' ')
+    html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
+    html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
+    # Strip html tags
+    html = re.sub('<.*?>', '', html)
+    # Replace html entities
+    html = unescapeHTML(html)
+    return html
 
 
 
 
 def sanitize_open(filename, open_mode):
 def sanitize_open(filename, open_mode):
-	"""Try to open the given filename, and slightly tweak it if this fails.
-
-	Attempts to open the given filename. If this fails, it tries to change
-	the filename slightly, step by step, until it's either able to open it
-	or it fails and raises a final exception, like the standard open()
-	function.
-
-	It returns the tuple (stream, definitive_file_name).
-	"""
-	try:
-		if filename == u'-':
-			if sys.platform == 'win32':
-				import msvcrt
-				msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
-			return (sys.stdout, filename)
-		stream = open(encodeFilename(filename), open_mode)
-		return (stream, filename)
-	except (IOError, OSError), err:
-		# In case of error, try to remove win32 forbidden chars
-		filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
-
-		# An exception here should be caught in the caller
-		stream = open(encodeFilename(filename), open_mode)
-		return (stream, filename)
+    """Try to open the given filename, and slightly tweak it if this fails.
+
+    Attempts to open the given filename. If this fails, it tries to change
+    the filename slightly, step by step, until it's either able to open it
+    or it fails and raises a final exception, like the standard open()
+    function.
+
+    It returns the tuple (stream, definitive_file_name).
+    """
+    try:
+        if filename == u'-':
+            if sys.platform == 'win32':
+                import msvcrt
+                msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+            return (sys.stdout, filename)
+        stream = open(encodeFilename(filename), open_mode)
+        return (stream, filename)
+    except (IOError, OSError) as err:
+        # In case of error, try to remove win32 forbidden chars
+        filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
+
+        # An exception here should be caught in the caller
+        stream = open(encodeFilename(filename), open_mode)
+        return (stream, filename)
 
 
 
 
 def timeconvert(timestr):
 def timeconvert(timestr):
-	"""Convert RFC 2822 defined time string into system timestamp"""
-	timestamp = None
-	timetuple = email.utils.parsedate_tz(timestr)
-	if timetuple is not None:
-		timestamp = email.utils.mktime_tz(timetuple)
-	return timestamp
-	
-def sanitize_filename(s):
-	"""Sanitizes a string so it could be used as part of a filename."""
-	def replace_insane(char):
-		if char == '?' or ord(char) < 32 or ord(char) == 127:
-			return ''
-		elif char == '"':
-			return '\''
-		elif char == ':':
-			return ' -'
-		elif char in '\\/|*<>':
-			return '-'
-		return char
-
-	result = u''.join(map(replace_insane, s))
-	while '--' in result:
-		result = result.replace('--', '-')
-	return result.strip('-')
+    """Convert RFC 2822 defined time string into system timestamp"""
+    timestamp = None
+    timetuple = email.utils.parsedate_tz(timestr)
+    if timetuple is not None:
+        timestamp = email.utils.mktime_tz(timetuple)
+    return timestamp
+
+def sanitize_filename(s, restricted=False, is_id=False):
+    """Sanitizes a string so it could be used as part of a filename.
+    If restricted is set, use a stricter subset of allowed characters.
+    Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
+    """
+    def replace_insane(char):
+        if char == '?' or ord(char) < 32 or ord(char) == 127:
+            return ''
+        elif char == '"':
+            return '' if restricted else '\''
+        elif char == ':':
+            return '_-' if restricted else ' -'
+        elif char in '\\/|*<>':
+            return '_'
+        if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
+            return '_'
+        if restricted and ord(char) > 127:
+            return '_'
+        return char
+
+    result = u''.join(map(replace_insane, s))
+    if not is_id:
+        while '__' in result:
+            result = result.replace('__', '_')
+        result = result.strip('_')
+        # Common case of "Foreign band name - English song title"
+        if restricted and result.startswith('-_'):
+            result = result[2:]
+        if not result:
+            result = '_'
+    return result
 
 
 def orderedSet(iterable):
 def orderedSet(iterable):
-	""" Remove all duplicates from the input iterable """
-	res = []
-	for el in iterable:
-		if el not in res:
-			res.append(el)
-	return res
+    """ Remove all duplicates from the input iterable """
+    res = []
+    for el in iterable:
+        if el not in res:
+            res.append(el)
+    return res
 
 
 def unescapeHTML(s):
 def unescapeHTML(s):
-	"""
-	@param s a string (of type unicode)
-	"""
-	assert type(s) == type(u'')
+    """
+    @param s a string
+    """
+    assert type(s) == type(u'')
 
 
-	result = re.sub(ur'(?u)&(.+?);', htmlentity_transform, s)
-	return result
+    result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
+    return result
 
 
 def encodeFilename(s):
 def encodeFilename(s):
-	"""
-	@param s The name of the file (of type unicode)
-	"""
+    """
+    @param s The name of the file
+    """
+
+    assert type(s) == type(u'')
+
+    # Python 3 has a Unicode API
+    if sys.version_info >= (3, 0):
+        return s
+
+    if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
+        # Pass u'' directly to use Unicode APIs on Windows 2000 and up
+        # (Detecting Windows NT 4 is tricky because 'major >= 4' would
+        # match Windows 9x series as well. Besides, NT 4 is obsolete.)
+        return s
+    else:
+        return s.encode(sys.getfilesystemencoding(), 'ignore')
+
+
+class ExtractorError(Exception):
+    """Error during info extraction."""
+    def __init__(self, msg, tb=None):
+        """ tb, if given, is the original traceback (so that it can be printed out). """
+        super(ExtractorError, self).__init__(msg)
+        self.traceback = tb
 
 
-	assert type(s) == type(u'')
+    def format_traceback(self):
+        if self.traceback is None:
+            return None
+        return u''.join(traceback.format_tb(self.traceback))
 
 
-	if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
-		# Pass u'' directly to use Unicode APIs on Windows 2000 and up
-		# (Detecting Windows NT 4 is tricky because 'major >= 4' would
-		# match Windows 9x series as well. Besides, NT 4 is obsolete.)
-		return s
-	else:
-		return s.encode(sys.getfilesystemencoding(), 'ignore')
 
 
 class DownloadError(Exception):
 class DownloadError(Exception):
-	"""Download Error exception.
+    """Download Error exception.
 
 
-	This exception may be thrown by FileDownloader objects if they are not
-	configured to continue on errors. They will contain the appropriate
-	error message.
-	"""
-	pass
+    This exception may be thrown by FileDownloader objects if they are not
+    configured to continue on errors. They will contain the appropriate
+    error message.
+    """
+    pass
 
 
 
 
 class SameFileError(Exception):
 class SameFileError(Exception):
-	"""Same File exception.
+    """Same File exception.
 
 
-	This exception will be thrown by FileDownloader objects if they detect
-	multiple files would have to be downloaded to the same file on disk.
-	"""
-	pass
+    This exception will be thrown by FileDownloader objects if they detect
+    multiple files would have to be downloaded to the same file on disk.
+    """
+    pass
 
 
 
 
 class PostProcessingError(Exception):
 class PostProcessingError(Exception):
-	"""Post Processing exception.
+    """Post Processing exception.
 
 
-	This exception may be raised by PostProcessor's .run() method to
-	indicate an error in the postprocessing task.
-	"""
-	pass
+    This exception may be raised by PostProcessor's .run() method to
+    indicate an error in the postprocessing task.
+    """
+    pass
 
 
 class MaxDownloadsReached(Exception):
 class MaxDownloadsReached(Exception):
-	""" --max-downloads limit has been reached. """
-	pass
+    """ --max-downloads limit has been reached. """
+    pass
 
 
 
 
 class UnavailableVideoError(Exception):
 class UnavailableVideoError(Exception):
-	"""Unavailable Format exception.
+    """Unavailable Format exception.
 
 
-	This exception will be thrown when a video is requested
-	in a format that is not available for that video.
-	"""
-	pass
+    This exception will be thrown when a video is requested
+    in a format that is not available for that video.
+    """
+    pass
 
 
 
 
 class ContentTooShortError(Exception):
 class ContentTooShortError(Exception):
-	"""Content Too Short exception.
-
-	This exception may be raised by FileDownloader objects when a file they
-	download is too small for what the server announced first, indicating
-	the connection was probably interrupted.
-	"""
-	# Both in bytes
-	downloaded = None
-	expected = None
-
-	def __init__(self, downloaded, expected):
-		self.downloaded = downloaded
-		self.expected = expected
-
-
-class Trouble(Exception):
-	"""Trouble helper exception
-	
-	This is an exception to be handled with
-	FileDownloader.trouble
-	"""
-
-class YoutubeDLHandler(urllib2.HTTPHandler):
-	"""Handler for HTTP requests and responses.
-
-	This class, when installed with an OpenerDirector, automatically adds
-	the standard headers to every HTTP request and handles gzipped and
-	deflated responses from web servers. If compression is to be avoided in
-	a particular request, the original request in the program code only has
-	to include the HTTP header "Youtubedl-No-Compression", which will be
-	removed before making the real request.
-
-	Part of this code was copied from:
-
-	http://techknack.net/python-urllib2-handlers/
-
-	Andrew Rowls, the author of that code, agreed to release it to the
-	public domain.
-	"""
-
-	@staticmethod
-	def deflate(data):
-		try:
-			return zlib.decompress(data, -zlib.MAX_WBITS)
-		except zlib.error:
-			return zlib.decompress(data)
-
-	@staticmethod
-	def addinfourl_wrapper(stream, headers, url, code):
-		if hasattr(urllib2.addinfourl, 'getcode'):
-			return urllib2.addinfourl(stream, headers, url, code)
-		ret = urllib2.addinfourl(stream, headers, url)
-		ret.code = code
-		return ret
-
-	def http_request(self, req):
-		for h in std_headers:
-			if h in req.headers:
-				del req.headers[h]
-			req.add_header(h, std_headers[h])
-		if 'Youtubedl-no-compression' in req.headers:
-			if 'Accept-encoding' in req.headers:
-				del req.headers['Accept-encoding']
-			del req.headers['Youtubedl-no-compression']
-		return req
-
-	def http_response(self, req, resp):
-		old_resp = resp
-		# gzip
-		if resp.headers.get('Content-encoding', '') == 'gzip':
-			gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
-			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
-			resp.msg = old_resp.msg
-		# deflate
-		if resp.headers.get('Content-encoding', '') == 'deflate':
-			gz = StringIO.StringIO(self.deflate(resp.read()))
-			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
-			resp.msg = old_resp.msg
-		return resp
+    """Content Too Short exception.
+
+    This exception may be raised by FileDownloader objects when a file they
+    download is too small for what the server announced first, indicating
+    the connection was probably interrupted.
+    """
+    # Both in bytes
+    downloaded = None
+    expected = None
+
+    def __init__(self, downloaded, expected):
+        self.downloaded = downloaded
+        self.expected = expected
+
+class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
+    """Handler for HTTP requests and responses.
+
+    This class, when installed with an OpenerDirector, automatically adds
+    the standard headers to every HTTP request and handles gzipped and
+    deflated responses from web servers. If compression is to be avoided in
+    a particular request, the original request in the program code only has
+    to include the HTTP header "Youtubedl-No-Compression", which will be
+    removed before making the real request.
+
+    Part of this code was copied from:
+
+    http://techknack.net/python-urllib2-handlers/
+
+    Andrew Rowls, the author of that code, agreed to release it to the
+    public domain.
+    """
+
+    @staticmethod
+    def deflate(data):
+        try:
+            return zlib.decompress(data, -zlib.MAX_WBITS)
+        except zlib.error:
+            return zlib.decompress(data)
+
+    @staticmethod
+    def addinfourl_wrapper(stream, headers, url, code):
+        if hasattr(compat_urllib_request.addinfourl, 'getcode'):
+            return compat_urllib_request.addinfourl(stream, headers, url, code)
+        ret = compat_urllib_request.addinfourl(stream, headers, url)
+        ret.code = code
+        return ret
+
+    def http_request(self, req):
+        for h in std_headers:
+            if h in req.headers:
+                del req.headers[h]
+            req.add_header(h, std_headers[h])
+        if 'Youtubedl-no-compression' in req.headers:
+            if 'Accept-encoding' in req.headers:
+                del req.headers['Accept-encoding']
+            del req.headers['Youtubedl-no-compression']
+        return req
+
+    def http_response(self, req, resp):
+        old_resp = resp
+        # gzip
+        if resp.headers.get('Content-encoding', '') == 'gzip':
+            gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r')
+            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
+            resp.msg = old_resp.msg
+        # deflate
+        if resp.headers.get('Content-encoding', '') == 'deflate':
+            gz = io.BytesIO(self.deflate(resp.read()))
+            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
+            resp.msg = old_resp.msg
+        return resp
+
+    https_request = http_request
+    https_response = http_response

+ 2 - 0
youtube_dl/version.py

@@ -0,0 +1,2 @@
+
+__version__ = '2013.01.02'