2
0
Эх сурвалжийг харах

Merge pull request #32950 from ytdl-org/master

Merge from master
dirkf 10 сар өмнө
parent
commit
49093c09c0
100 өөрчлөгдсөн 11007 нэмэгдсэн , 1594 устгасан
  1. 1 0
      .github/ISSUE_TEMPLATE/config.yml
  2. 435 37
      .github/workflows/ci.yml
  3. 1 1
      CONTRIBUTING.md
  4. 121 26
      README.md
  5. 1 0
      devscripts/__init__.py
  6. 7 4
      devscripts/bash-completion.py
  7. 83 0
      devscripts/cli_to_api.py
  8. 5 4
      devscripts/create-github-release.py
  9. 6 5
      devscripts/fish-completion.py
  10. 10 5
      devscripts/gh-pages/add-version.py
  11. 12 5
      devscripts/gh-pages/generate-download.py
  12. 11 6
      devscripts/gh-pages/update-copyright.py
  13. 8 3
      devscripts/gh-pages/update-feed.py
  14. 6 5
      devscripts/gh-pages/update-sites.py
  15. 4 5
      devscripts/make_contributing.py
  16. 7 10
      devscripts/make_issue_template.py
  17. 41 10
      devscripts/make_lazy_extractors.py
  18. 10 5
      devscripts/make_readme.py
  19. 8 7
      devscripts/make_supportedsites.py
  20. 4 6
      devscripts/prepare_manpage.py
  21. 62 0
      devscripts/utils.py
  22. 4 4
      devscripts/zsh-completion.py
  23. 34 31
      test/helper.py
  24. 296 21
      test/test_InfoExtractor.py
  25. 205 16
      test/test_YoutubeDL.py
  26. 14 0
      test/test_YoutubeDLCookieJar.py
  27. 8 1
      test/test_aes.py
  28. 8 4
      test/test_age_restriction.py
  29. 14 2
      test/test_cache.py
  30. 32 4
      test/test_compat.py
  31. 42 14
      test/test_download.py
  32. 272 0
      test/test_downloader_external.py
  33. 6 13
      test/test_downloader_http.py
  34. 24 16
      test/test_execution.py
  35. 494 56
      test/test_http.py
  36. 332 113
      test/test_jsinterp.py
  37. 73 27
      test/test_subtitles.py
  38. 6 4
      test/test_swfinterp.py
  39. 509 0
      test/test_traversal.py
  40. 8 5
      test/test_unicode_literals.py
  41. 309 61
      test/test_utils.py
  42. 2 3
      test/test_write_annotations.py
  43. 96 3
      test/test_youtube_signature.py
  44. 35 0
      test/testdata/mpd/range_only.mpd
  45. 351 0
      test/testdata/mpd/subtitles.mpd
  46. 32 0
      test/testdata/mpd/url_and_range.mpd
  47. 351 105
      youtube_dl/YoutubeDL.py
  48. 7 5
      youtube_dl/__init__.py
  49. 36 3
      youtube_dl/aes.py
  50. 28 8
      youtube_dl/cache.py
  51. 1667 0
      youtube_dl/casefold.py
  52. 602 114
      youtube_dl/compat.py
  53. 3 0
      youtube_dl/downloader/__init__.py
  54. 21 7
      youtube_dl/downloader/common.py
  55. 25 22
      youtube_dl/downloader/dash.py
  56. 203 38
      youtube_dl/downloader/external.py
  57. 31 15
      youtube_dl/downloader/fragment.py
  58. 8 10
      youtube_dl/downloader/http.py
  59. 6 4
      youtube_dl/downloader/rtmp.py
  60. 32 25
      youtube_dl/extractor/adn.py
  61. 20 7
      youtube_dl/extractor/aenetworks.py
  62. 1 1
      youtube_dl/extractor/aliexpress.py
  63. 89 0
      youtube_dl/extractor/alsace20tv.py
  64. 88 27
      youtube_dl/extractor/americastestkitchen.py
  65. 59 0
      youtube_dl/extractor/bigo.py
  66. 5 0
      youtube_dl/extractor/bilibili.py
  67. 173 0
      youtube_dl/extractor/blerp.py
  68. 17 1
      youtube_dl/extractor/bongacams.py
  69. 79 0
      youtube_dl/extractor/caffeine.py
  70. 74 0
      youtube_dl/extractor/callin.py
  71. 10 24
      youtube_dl/extractor/cammodels.py
  72. 91 79
      youtube_dl/extractor/ceskatelevize.py
  73. 69 0
      youtube_dl/extractor/clipchamp.py
  74. 521 207
      youtube_dl/extractor/common.py
  75. 148 0
      youtube_dl/extractor/cpac.py
  76. 204 0
      youtube_dl/extractor/dlf.py
  77. 101 0
      youtube_dl/extractor/epidemicsound.py
  78. 69 18
      youtube_dl/extractor/extractors.py
  79. 101 0
      youtube_dl/extractor/fifa.py
  80. 139 0
      youtube_dl/extractor/gbnews.py
  81. 217 2
      youtube_dl/extractor/generic.py
  82. 273 0
      youtube_dl/extractor/globalplayer.py
  83. 101 0
      youtube_dl/extractor/hrfernsehen.py
  84. 247 90
      youtube_dl/extractor/ign.py
  85. 279 69
      youtube_dl/extractor/imgur.py
  86. 8 1
      youtube_dl/extractor/infoq.py
  87. 298 82
      youtube_dl/extractor/itv.py
  88. 1 1
      youtube_dl/extractor/kaltura.py
  89. 35 0
      youtube_dl/extractor/kommunetv.py
  90. 31 0
      youtube_dl/extractor/kth.py
  91. 99 24
      youtube_dl/extractor/manyvids.py
  92. 5 1
      youtube_dl/extractor/mediaset.py
  93. 1 1
      youtube_dl/extractor/minds.py
  94. 24 6
      youtube_dl/extractor/mixcloud.py
  95. 31 11
      youtube_dl/extractor/motherless.py
  96. 3 1
      youtube_dl/extractor/myspass.py
  97. 87 0
      youtube_dl/extractor/myvideoge.py
  98. 121 38
      youtube_dl/extractor/neteasemusic.py
  99. 18 3
      youtube_dl/extractor/nhk.py
  100. 1 2
      youtube_dl/extractor/nrk.py

+ 1 - 0
.github/ISSUE_TEMPLATE/config.yml

@@ -0,0 +1 @@
+blank_issues_enabled: false

+ 435 - 37
.github/workflows/ci.yml

@@ -1,81 +1,479 @@
 name: CI
 name: CI
-on: [push, pull_request]
+
+env:
+  all-cpython-versions: 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12
+  main-cpython-versions: 2.7, 3.2, 3.5, 3.9, 3.11
+  pypy-versions: pypy-2.7, pypy-3.6, pypy-3.7
+  cpython-versions: main
+  test-set: core
+  # Python beta version to be built using pyenv before setup-python support
+  # Must also be included in all-cpython-versions 
+  next: 3.13
+
+on:
+  push:
+    # push inputs aren't known to GitHub
+    inputs:
+      cpython-versions:
+        type: string
+        default: all
+      test-set:
+        type: string
+        default: core
+  pull_request:
+    # pull_request inputs aren't known to GitHub
+    inputs:
+      cpython-versions:
+        type: string
+        default: main
+      test-set:
+        type: string
+        default: both
+  workflow_dispatch:
+    inputs:
+      cpython-versions:
+        type: choice
+        description: CPython versions (main = 2.7, 3.2, 3.5, 3.9, 3.11)
+        options:
+          - all
+          - main
+        required: true
+        default: main
+      test-set:
+        type: choice
+        description: core, download
+        options:
+          - both
+          - core
+          - download
+        required: true
+        default: both
+
+permissions:
+  contents: read
+
 jobs:
 jobs:
+  select:
+    name: Select tests from inputs
+    runs-on: ubuntu-latest
+    outputs:
+      cpython-versions: ${{ steps.run.outputs.cpython-versions }}
+      test-set: ${{ steps.run.outputs.test-set }}
+      own-pip-versions: ${{ steps.run.outputs.own-pip-versions }}
+    steps:
+    # push and pull_request inputs aren't known to GitHub (pt3)
+    - name: Set push defaults
+      if: ${{ github.event_name == 'push' }}
+      env:
+        cpython-versions: all
+        test-set: core
+      run: |
+        echo "cpython-versions=${{env.cpython-versions}}" >> "$GITHUB_ENV"
+        echo "test_set=${{env.test_set}}" >> "$GITHUB_ENV"
+    - name: Get pull_request inputs
+      if: ${{ github.event_name == 'pull_request' }}
+      env:
+        cpython-versions: main
+        test-set: both
+      run: |
+        echo "cpython-versions=${{env.cpython-versions}}" >> "$GITHUB_ENV"
+        echo "test_set=${{env.test_set}}" >> "$GITHUB_ENV"
+    - name: Make version array
+      id: run
+      run: |
+        # Make a JSON Array from comma/space-separated string (no extra escaping)
+        json_list() { \
+          ret=""; IFS="${IFS},"; set -- $*; \
+          for a in "$@"; do \
+            ret=$(printf '%s"%s"' "${ret}${ret:+, }" "$a"); \
+          done; \
+          printf '[%s]' "$ret"; }
+        tests="${{ inputs.test-set || env.test-set }}"
+        [ $tests = both ] && tests="core download"
+        printf 'test-set=%s\n' "$(json_list $tests)" >> "$GITHUB_OUTPUT"
+        versions="${{ inputs.cpython-versions || env.cpython-versions }}"
+        if [ "$versions" = all ]; then \
+          versions="${{ env.all-cpython-versions }}"; else \
+          versions="${{ env.main-cpython-versions }}"; \
+        fi
+        printf 'cpython-versions=%s\n' \
+          "$(json_list ${versions}${versions:+, }${{ env.pypy-versions }})" >> "$GITHUB_OUTPUT"
+        # versions with a special get-pip.py in a per-version subdirectory
+        printf 'own-pip-versions=%s\n' \
+          "$(json_list 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6)" >> "$GITHUB_OUTPUT"
+
   tests:
   tests:
-    name: Tests
+    name: Run tests
+    needs: select
+    permissions:
+      contents: read
+      packages: write
     runs-on: ${{ matrix.os }}
     runs-on: ${{ matrix.os }}
+    env:
+      PIP: python -m pip
+      PIP_DISABLE_PIP_VERSION_CHECK: true
+      PIP_NO_PYTHON_VERSION_WARNING: true
     strategy:
     strategy:
       fail-fast: true
       fail-fast: true
       matrix:
       matrix:
-        os: [ubuntu-18.04]
-        # TODO: python 2.6
-        python-version: [2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, pypy-2.7, pypy-3.6, pypy-3.7]
+        os: [ubuntu-20.04]
+        python-version: ${{ fromJSON(needs.select.outputs.cpython-versions) }}
         python-impl: [cpython]
         python-impl: [cpython]
-        ytdl-test-set: [core, download]
+        ytdl-test-set: ${{ fromJSON(needs.select.outputs.test-set) }}
         run-tests-ext: [sh]
         run-tests-ext: [sh]
         include:
         include:
-        # python 3.2 is only available on windows via setup-python
-        - os: windows-latest
-          python-version: 3.2
+        - os: windows-2019
+          python-version: 3.4
           python-impl: cpython
           python-impl: cpython
-          ytdl-test-set: core
+          ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
           run-tests-ext: bat
           run-tests-ext: bat
-        - os: windows-latest
-          python-version: 3.2
+        - os: windows-2019
+          python-version: 3.4
           python-impl: cpython
           python-impl: cpython
-          ytdl-test-set: download
+          ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download'  || 'nodownload' }}
           run-tests-ext: bat
           run-tests-ext: bat
         # jython
         # jython
-        - os: ubuntu-18.04
+        - os: ubuntu-20.04
+          python-version: 2.7
           python-impl: jython
           python-impl: jython
-          ytdl-test-set: core
+          ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
           run-tests-ext: sh
           run-tests-ext: sh
-        - os: ubuntu-18.04
+        - os: ubuntu-20.04
+          python-version: 2.7
           python-impl: jython
           python-impl: jython
-          ytdl-test-set: download
+          ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download'  || 'nodownload' }}
           run-tests-ext: sh
           run-tests-ext: sh
     steps:
     steps:
-    - uses: actions/checkout@v2
-    - name: Set up Python ${{ matrix.python-version }}
-      uses: actions/setup-python@v2
-      if: ${{ matrix.python-impl == 'cpython' }}
+    - name: Prepare Linux
+      if: ${{ startswith(matrix.os, 'ubuntu') }}
+      shell: bash
+      run: |
+        # apt in runner, if needed, may not be up-to-date
+        sudo apt-get update
+    - name: Checkout
+      uses: actions/checkout@v3
+    #-------- Python 3 -----
+    - name: Set up supported Python ${{ matrix.python-version }}
+      id: setup-python
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version != '2.6' && matrix.python-version != '2.7' && matrix.python-version != env.next }}
+      # wrap broken actions/setup-python@v4
+      # NB may run apt-get install in Linux
+      uses: ytdl-org/setup-python@v1
+      env:
+        # Temporary workaround for Python 3.5 failures - May 2024
+        PIP_TRUSTED_HOST: "pypi.python.org pypi.org files.pythonhosted.org"
       with:
       with:
         python-version: ${{ matrix.python-version }}
         python-version: ${{ matrix.python-version }}
+        cache-build: true
+        allow-build: info
+    - name: Locate supported Python ${{ matrix.python-version }}
+      if: ${{ env.pythonLocation }}
+      shell: bash
+      run: |
+        echo "PYTHONHOME=${pythonLocation}" >> "$GITHUB_ENV"
+        export expected="${{ steps.setup-python.outputs.python-path }}"
+        dirname() { printf '%s\n' \
+            'import os, sys' \
+            'print(os.path.dirname(sys.argv[1]))' \
+            | ${expected} - "$1"; }
+        expd="$(dirname "$expected")"
+        export python="$(command -v python)"
+        [ "$expd" = "$(dirname "$python")" ] || echo "PATH=$expd:${PATH}" >> "$GITHUB_ENV"
+        [ -x "$python" ] || printf '%s\n' \
+            'import os' \
+            'exp = os.environ["expected"]' \
+            'python = os.environ["python"]' \
+            'exps = os.path.split(exp)' \
+            'if python and (os.path.dirname(python) == exp[0]):' \
+            '    exit(0)' \
+            'exps[1] = "python" + os.path.splitext(exps[1])[1]' \
+            'python = os.path.join(*exps)' \
+            'try:' \
+            '    os.symlink(exp, python)' \
+            'except AttributeError:' \
+            '    os.rename(exp, python)' \
+            | ${expected} -
+        printf '%s\n' \
+            'import sys' \
+            'print(sys.path)' \
+            | ${expected} -
+    #-------- Python next (was 3.12) -
+    - name: Set up CPython 3.next environment
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
+      shell: bash
+      run: |
+        PYENV_ROOT=$HOME/.local/share/pyenv
+        echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
+    - name: Cache Python 3.next 
+      id: cachenext
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
+      uses: actions/cache@v3
+      with:
+        key: python-${{ env.next }}
+        path: |
+          ${{ env.PYENV_ROOT }}
+    - name: Build and set up Python 3.next
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next && ! steps.cachenext.outputs.cache-hit }}
+      # dl and build locally
+      shell: bash
+      run: |
+        # Install build environment
+        sudo apt-get install -y build-essential llvm libssl-dev tk-dev  \
+                      libncursesw5-dev libreadline-dev libsqlite3-dev   \
+                      libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
+        # Download PyEnv from its GitHub repository.
+        export PYENV_ROOT=${{ env.PYENV_ROOT }}
+        export PATH=$PYENV_ROOT/bin:$PATH
+        git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
+        pyenv install ${{ env.next }}
+    - name: Locate Python 3.next
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
+      shell: bash
+      run: |
+        PYTHONHOME="$(echo "${{ env.PYENV_ROOT }}/versions/${{ env.next }}."*)"
+        test -n "$PYTHONHOME"
+        echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
+        echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
+    #-------- Python 2.7 --
+    - name: Set up Python 2.7
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.7' }}
+      # install 2.7
+      shell: bash
+      run: |
+        sudo apt-get install -y python2 python-is-python2
+        echo "PYTHONHOME=/usr" >> "$GITHUB_ENV"
+    #-------- Python 2.6 --
+    - name: Set up Python 2.6 environment
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
+      shell: bash
+      run: |
+        openssl_name=openssl-1.0.2u
+        echo "openssl_name=${openssl_name}" >> "$GITHUB_ENV"
+        openssl_dir=$HOME/.local/opt/$openssl_name
+        echo "openssl_dir=${openssl_dir}" >> "$GITHUB_ENV"
+        PYENV_ROOT=$HOME/.local/share/pyenv
+        echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
+        sudo apt-get install -y openssl ca-certificates
+    - name: Cache Python 2.6
+      id: cache26
+      if: ${{ matrix.python-version == '2.6' }}
+      uses: actions/cache@v3
+      with:
+        key: python-2.6.9
+        path: |
+          ${{ env.openssl_dir }}
+          ${{ env.PYENV_ROOT }}
+    - name: Build and set up Python 2.6
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' && ! steps.cache26.outputs.cache-hit }}
+      # dl and build locally
+      shell: bash
+      run: |
+        # Install build environment
+        sudo apt-get install -y build-essential llvm libssl-dev tk-dev  \
+                      libncursesw5-dev libreadline-dev libsqlite3-dev   \
+                      libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
+        # Download and install OpenSSL 1.0.2, back in time
+        openssl_name=${{ env.openssl_name }}
+        openssl_targz=${openssl_name}.tar.gz
+        openssl_dir=${{ env.openssl_dir }}
+        openssl_inc=$openssl_dir/include
+        openssl_lib=$openssl_dir/lib
+        openssl_ssl=$openssl_dir/ssl
+        curl -L "https://www.openssl.org/source/$openssl_targz" -o $openssl_targz
+        tar -xf $openssl_targz
+        ( cd $openssl_name; \
+          ./config --prefix=$openssl_dir --openssldir=${openssl_dir}/ssl \
+            --libdir=lib -Wl,-rpath=${openssl_dir}/lib shared zlib-dynamic && \
+          make && \
+          make install )
+        rm -rf $openssl_name
+        rmdir $openssl_ssl/certs && ln -s /etc/ssl/certs $openssl_ssl/certs
+        # Download PyEnv from its GitHub repository.
+        export PYENV_ROOT=${{ env.PYENV_ROOT }}
+        export PATH=$PYENV_ROOT/bin:$PATH
+        git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
+        # Prevent pyenv build trying (and failing) to update pip
+        export GET_PIP=get-pip-2.6.py
+        echo 'import sys; sys.exit(0)' > ${GET_PIP}
+        GET_PIP=$(realpath $GET_PIP)
+        # Build and install Python
+        export CFLAGS="-I$openssl_inc"
+        export LDFLAGS="-L$openssl_lib"
+        export LD_LIBRARY_PATH="$openssl_lib"
+        pyenv install 2.6.9
+    - name: Locate Python 2.6
+      if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
+      shell: bash
+      run: |
+        PYTHONHOME="${{ env.PYENV_ROOT }}/versions/2.6.9"
+        echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
+        echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
+        echo "LD_LIBRARY_PATH=${{ env.openssl_dir }}/lib${LD_LIBRARY_PATH:+:}${LD_LIBRARY_PATH}" >> "$GITHUB_ENV"
+    #-------- Jython ------
     - name: Set up Java 8
     - name: Set up Java 8
       if: ${{ matrix.python-impl == 'jython' }}
       if: ${{ matrix.python-impl == 'jython' }}
-      uses: actions/setup-java@v1
+      uses: actions/setup-java@v3
       with:
       with:
         java-version: 8
         java-version: 8
-    - name: Install Jython
+        distribution: 'zulu'
+    - name: Setup Jython environment
       if: ${{ matrix.python-impl == 'jython' }}
       if: ${{ matrix.python-impl == 'jython' }}
+      shell: bash
       run: |
       run: |
-        wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
-        java -jar jython-installer.jar -s -d "$HOME/jython"
-        echo "$HOME/jython/bin" >> $GITHUB_PATH
-    - name: Install nose
-      if: ${{ matrix.python-impl != 'jython' }}
-      run: pip install nose
-    - name: Install nose (Jython)
-      if: ${{ matrix.python-impl == 'jython' }}
-      # Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
+        echo "JYTHON_ROOT=${HOME}/jython" >> "$GITHUB_ENV"
+        echo "PIP=pip" >> "$GITHUB_ENV"
+    - name: Cache Jython
+      id: cachejy
+      if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' }}
+      uses: actions/cache@v3
+      with:
+        # 2.7.3 now available, may solve SNI issue
+        key: jython-2.7.1
+        path: |
+          ${{ env.JYTHON_ROOT }}
+    - name: Install Jython
+      if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' && ! steps.cachejy.outputs.cache-hit }}
+      shell: bash
+      run: |
+        JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
+        curl -L "https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar" -o jython-installer.jar
+        java -jar jython-installer.jar -s -d "${JYTHON_ROOT}"
+        echo "${JYTHON_ROOT}/bin" >> "$GITHUB_PATH"
+    - name: Set up cached Jython
+      if: ${{ steps.cachejy.outputs.cache-hit }}
+      shell: bash
+      run: |
+        JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
+        echo "${JYTHON_ROOT}/bin" >> $GITHUB_PATH
+    - name: Install supporting Python 2.7 if possible
+      if: ${{ steps.cachejy.outputs.cache-hit }}
+      shell: bash
+      run: |
+        sudo apt-get install -y python2.7 || true
+    #-------- pip ---------
+    - name: Set up supported Python ${{ matrix.python-version }} pip
+      if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || matrix.python-version == '2.7' }}
+      # This step may run in either Linux or Windows
+      shell: bash
       run: |
       run: |
-        wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl
-        pip install nose-1.3.7-py2-none-any.whl
+        echo "$PATH"
+        echo "$PYTHONHOME"
+        # curl is available on both Windows and Linux, -L follows redirects, -O gets name
+        python -m ensurepip || python -m pip --version || { \
+          get_pip="${{ contains(needs.select.outputs.own-pip-versions, matrix.python-version) && format('{0}/', matrix.python-version) || '' }}"; \
+          curl -L -O "https://bootstrap.pypa.io/pip/${get_pip}get-pip.py"; \
+          python get-pip.py; }
+    - name: Set up Python 2.6 pip
+      if: ${{ matrix.python-version == '2.6' }}
+      shell: bash
+      run: |
+        python -m pip --version || { \
+          curl -L -O "https://bootstrap.pypa.io/pip/2.6/get-pip.py"; \
+          curl -L -O "https://files.pythonhosted.org/packages/ac/95/a05b56bb975efa78d3557efa36acaf9cf5d2fd0ee0062060493687432e03/pip-9.0.3-py2.py3-none-any.whl"; \
+          python get-pip.py --no-setuptools --no-wheel pip-9.0.3-py2.py3-none-any.whl; }
+        # work-around to invoke pip module on 2.6: https://bugs.python.org/issue2751
+        echo "PIP=python -m pip.__main__" >> "$GITHUB_ENV"
+    - name: Set up other Python ${{ matrix.python-version }} pip
+      if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
+      shell: bash
+      run: |
+        python -m pip --version || { \
+          curl -L -O "https://bootstrap.pypa.io/pip/3.2/get-pip.py"; \
+          curl -L -O "https://files.pythonhosted.org/packages/b2/d0/cd115fe345dd6f07ec1c780020a7dfe74966fceeb171e0f20d1d4905b0b7/pip-7.1.2-py2.py3-none-any.whl"; \
+          python get-pip.py --no-setuptools --no-wheel pip-7.1.2-py2.py3-none-any.whl; }
+    #-------- unittest ----
+    - name: Upgrade Unittest for Python 2.6
+      if: ${{ matrix.python-version == '2.6' }}
+      shell: bash
+      run: |
+        # Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
+        $PIP -qq show unittest2 || { \
+          for u in "65/26/32b8464df2a97e6dd1b656ed26b2c194606c16fe163c695a992b36c11cdf/six-1.13.0-py2.py3-none-any.whl" \
+              "f2/94/3af39d34be01a24a6e65433d19e107099374224905f1e0cc6bbe1fd22a2f/argparse-1.4.0-py2.py3-none-any.whl" \
+              "c7/a3/c5da2a44c85bfbb6eebcfc1dde24933f8704441b98fdde6528f4831757a6/linecache2-1.0.0-py2.py3-none-any.whl" \
+              "17/0a/6ac05a3723017a967193456a2efa0aa9ac4b51456891af1e2353bb9de21e/traceback2-1.4.0-py2.py3-none-any.whl" \
+              "72/20/7f0f433060a962200b7272b8c12ba90ef5b903e218174301d0abfd523813/unittest2-1.1.0-py2.py3-none-any.whl"; do \
+            curl -L -O "https://files.pythonhosted.org/packages/${u}"; \
+            $PIP install ${u##*/}; \
+          done; }
+        # make tests use unittest2
+        for test in ./test/test_*.py ./test/helper.py; do
+          sed -r -i -e '/^import unittest$/s/test/test2 as unittest/' "$test"
+        done
+    #-------- nose --------
+    - name: Install nose for Python ${{ matrix.python-version }}
+      if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || (matrix.python-impl == 'cpython' && (matrix.python-version == '2.7' || matrix.python-version == env.next)) }}
+      shell: bash
+      run: |
+        echo "$PATH"
+        echo "$PYTHONHOME"
+        # Use PyNose for recent Pythons instead of Nose
+        py3ver="${{ matrix.python-version }}"
+        py3ver=${py3ver#3.}
+        [ "$py3ver" != "${{ matrix.python-version }}" ] && py3ver=${py3ver%.*} || py3ver=0
+        [ "$py3ver" -ge 9 ] && nose=pynose || nose=nose
+        $PIP -qq show $nose || $PIP install $nose
+    - name: Install nose for other Python 2
+      if: ${{ matrix.python-impl == 'jython' || (matrix.python-impl == 'cpython' && matrix.python-version == '2.6') }}
+      shell: bash
+      run: |
+        # Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
+        $PIP -qq show nose || { \
+          curl -L -O "https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl"; \
+          $PIP install nose-1.3.7-py2-none-any.whl; }
+    - name: Install nose for other Python 3
+      if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
+      shell: bash
+      run: |
+        $PIP -qq show nose || { \
+          curl -L -O "https://files.pythonhosted.org/packages/15/d8/dd071918c040f50fa1cf80da16423af51ff8ce4a0f2399b7bf8de45ac3d9/nose-1.3.7-py3-none-any.whl"; \
+          $PIP install nose-1.3.7-py3-none-any.whl; }
+    - name: Set up nosetest test
+      if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
+      shell: bash
+      run: |
+        # set PYTHON_VER
+        PYTHON_VER=${{ matrix.python-version }}
+        [ "${PYTHON_VER#*-}" != "$PYTHON_VER" ] || PYTHON_VER="${{ matrix.python-impl }}-${PYTHON_VER}"
+        echo "PYTHON_VER=$PYTHON_VER" >> "$GITHUB_ENV"
+        echo "PYTHON_IMPL=${{ matrix.python-impl }}" >> "$GITHUB_ENV"
+        # define a test to validate the Python version used by nosetests
+        printf '%s\n' \
+          'from __future__ import unicode_literals' \
+          'import sys, os, platform' \
+          'try:' \
+          '    import unittest2 as unittest' \
+          'except ImportError:' \
+          '    import unittest' \
+          'class TestPython(unittest.TestCase):' \
+          '    def setUp(self):' \
+          '        self.ver = os.environ["PYTHON_VER"].split("-")' \
+          '    def test_python_ver(self):' \
+          '        self.assertEqual(["%d" % v for v in sys.version_info[:2]], self.ver[-1].split(".")[:2])' \
+          '        self.assertTrue(sys.version.startswith(self.ver[-1]))' \
+          '        self.assertIn(self.ver[0], ",".join((sys.version, platform.python_implementation())).lower())' \
+          '    def test_python_impl(self):' \
+          '        self.assertIn(platform.python_implementation().lower(), (os.environ["PYTHON_IMPL"], self.ver[0]))' \
+          > test/test_python.py
+    #-------- TESTS -------
     - name: Run tests
     - name: Run tests
+      if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
       continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
       continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
       env:
       env:
         YTDL_TEST_SET: ${{ matrix.ytdl-test-set }}
         YTDL_TEST_SET: ${{ matrix.ytdl-test-set }}
-      run: ./devscripts/run_tests.${{ matrix.run-tests-ext }}
+      run: |
+        ./devscripts/run_tests.${{ matrix.run-tests-ext }}
   flake8:
   flake8:
     name: Linter
     name: Linter
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-    - uses: actions/checkout@v2
+    - uses: actions/checkout@v3
     - name: Set up Python
     - name: Set up Python
-      uses: actions/setup-python@v2
+      uses: actions/setup-python@v4
       with:
       with:
         python-version: 3.9
         python-version: 3.9
     - name: Install flake8
     - name: Install flake8
       run: pip install flake8
       run: pip install flake8
     - name: Run flake8
     - name: Run flake8
       run: flake8 .
       run: flake8 .
+

+ 1 - 1
CONTRIBUTING.md

@@ -150,7 +150,7 @@ After you have ensured this site is distributing its content legally, you can fo
                 # TODO more properties (see youtube_dl/extractor/common.py)
                 # TODO more properties (see youtube_dl/extractor/common.py)
             }
             }
     ```
     ```
-5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
+5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py). This makes the extractor available for use, as long as the class ends with `IE`.
 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
 7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
 7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
 8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
 8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):

+ 121 - 26
README.md

@@ -33,7 +33,7 @@ Windows users can [download an .exe file](https://yt-dl.org/latest/youtube-dl.ex
 You can also use pip:
 You can also use pip:
 
 
     sudo -H pip install --upgrade youtube-dl
     sudo -H pip install --upgrade youtube-dl
-    
+
 This command will update youtube-dl if you have already installed it. See the [pypi page](https://pypi.python.org/pypi/youtube_dl) for more information.
 This command will update youtube-dl if you have already installed it. See the [pypi page](https://pypi.python.org/pypi/youtube_dl) for more information.
 
 
 macOS users can install youtube-dl with [Homebrew](https://brew.sh/):
 macOS users can install youtube-dl with [Homebrew](https://brew.sh/):
@@ -563,7 +563,7 @@ The basic usage is not to set any template arguments when downloading a single f
  - `is_live` (boolean): Whether this video is a live stream or a fixed-length video
  - `is_live` (boolean): Whether this video is a live stream or a fixed-length video
  - `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
  - `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
  - `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
  - `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
- - `format` (string): A human-readable description of the format 
+ - `format` (string): A human-readable description of the format
  - `format_id` (string): Format code specified by `--format`
  - `format_id` (string): Format code specified by `--format`
  - `format_note` (string): Additional info about the format
  - `format_note` (string): Additional info about the format
  - `width` (numeric): Width of the video
  - `width` (numeric): Width of the video
@@ -632,7 +632,7 @@ To use percent literals in an output template use `%%`. To output to stdout use
 
 
 The current default template is `%(title)s-%(id)s.%(ext)s`.
 The current default template is `%(title)s-%(id)s.%(ext)s`.
 
 
-In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
+In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title.
 
 
 #### Output template and Windows batch files
 #### Output template and Windows batch files
 
 
@@ -675,7 +675,7 @@ The general syntax for format selection is `--format FORMAT` or shorter `-f FORM
 
 
 **tl;dr:** [navigate me to examples](#format-selection-examples).
 **tl;dr:** [navigate me to examples](#format-selection-examples).
 
 
-The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific. 
+The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
 
 
 You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
 You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
 
 
@@ -760,7 +760,7 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
 
 
  - Absolute dates: Dates in the format `YYYYMMDD`.
  - Absolute dates: Dates in the format `YYYYMMDD`.
  - Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
  - Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
- 
+
 Examples:
 Examples:
 
 
 ```bash
 ```bash
@@ -918,7 +918,7 @@ Either prepend `https://www.youtube.com/watch?v=` or separate the ID from the op
 
 
 Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`.
 Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`.
 
 
-In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [Get cookies.txt](https://chrome.google.com/webstore/detail/get-cookiestxt/bgaddhkoddajcdgocldbbfleckgcbcid/) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox).
+In order to extract cookies from browser use any conforming browser extension for exporting cookies. For example, [Get cookies.txt LOCALLY](https://chrome.google.com/webstore/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc) (for Chrome) or [cookies.txt](https://addons.mozilla.org/en-US/firefox/addon/cookies-txt/) (for Firefox).
 
 
 Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
 Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows and `LF` (`\n`) for Unix and Unix-like systems (Linux, macOS, etc.). `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
 
 
@@ -1000,6 +1000,8 @@ To run the test, simply invoke your favorite test runner, or execute a test file
     python test/test_download.py
     python test/test_download.py
     nosetests
     nosetests
 
 
+For Python versions 3.6 and later, you can use [pynose](https://pypi.org/project/pynose/) to implement `nosetests`. The original [nose](https://pypi.org/project/nose/) has not been upgraded for 3.10 and later.
+
 See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
 See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
 
 
 If you want to create a build of youtube-dl yourself, you'll need
 If you want to create a build of youtube-dl yourself, you'll need
@@ -1069,9 +1071,11 @@ After you have ensured this site is distributing its content legally, you can fo
             }
             }
     ```
     ```
 5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
 5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
-7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
-8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test (actually, test case) then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note:
+    * the test names use the extractor class name **without the trailing `IE`**
+    * tests with `only_matching` key in test's dict are not counted.
+8. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
+9. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
 
 
         $ flake8 youtube_dl/extractor/yourextractor.py
         $ flake8 youtube_dl/extractor/yourextractor.py
 
 
@@ -1089,7 +1093,7 @@ In any case, thank you very much for your contributions!
 
 
 ## youtube-dl coding conventions
 ## youtube-dl coding conventions
 
 
-This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
+This section introduces guidelines for writing idiomatic, robust and future-proof extractor code.
 
 
 Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
 Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
 
 
@@ -1112,7 +1116,7 @@ Say you have some source dictionary `meta` that you've fetched as JSON with HTTP
 ```python
 ```python
 meta = self._download_json(url, video_id)
 meta = self._download_json(url, video_id)
 ```
 ```
-    
+
 Assume at this point `meta`'s layout is:
 Assume at this point `meta`'s layout is:
 
 
 ```python
 ```python
@@ -1156,7 +1160,7 @@ description = self._search_regex(
 ```
 ```
 
 
 On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
 On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
- 
+
 ### Provide fallbacks
 ### Provide fallbacks
 
 
 When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
 When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
@@ -1204,7 +1208,7 @@ r'(id|ID)=(?P<id>\d+)'
 #### Make regular expressions relaxed and flexible
 #### Make regular expressions relaxed and flexible
 
 
 When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
 When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
- 
+
 ##### Example
 ##### Example
 
 
 Say you need to extract `title` from the following HTML code:
 Say you need to extract `title` from the following HTML code:
@@ -1228,7 +1232,7 @@ title = self._search_regex(
     webpage, 'title', group='title')
     webpage, 'title', group='title')
 ```
 ```
 
 
-Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute: 
+Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute:
 
 
 The code definitely should not look like:
 The code definitely should not look like:
 
 
@@ -1329,27 +1333,114 @@ Wrap all extracted numeric data into safe functions from [`youtube_dl/utils.py`]
 
 
 Use `url_or_none` for safe URL processing.
 Use `url_or_none` for safe URL processing.
 
 
-Use `try_get` for safe metadata extraction from parsed JSON.
+Use `traverse_obj` for safe metadata extraction from parsed JSON.
 
 
-Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction. 
+Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
 
 
 Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py) for more useful convenience functions.
 Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py) for more useful convenience functions.
 
 
 #### More examples
 #### More examples
 
 
 ##### Safely extract optional description from parsed JSON
 ##### Safely extract optional description from parsed JSON
+
+When processing complex JSON, as often returned by site API requests or stashed in web pages for "hydration", you can use the `traverse_obj()` utility function to handle multiple fallback values and to ensure the expected type of metadata items. The function's docstring defines how the function works: also review usage in the codebase for more examples.
+
+In this example, a text `description`, or `None`, is pulled from the `.result.video[0].summary` member of the parsed JSON `response`, if available.
+
+```python
+description = traverse_obj(response, ('result', 'video', 0, 'summary', T(compat_str)))
+```
+`T(...)` is a shorthand for a set literal; if you hate people who still run Python 2.6, `T(type_or_transformation)` could be written as a set literal `{type_or_transformation}`.
+
+Some extractors use the older and less capable `try_get()` function in the same way.
+
 ```python
 ```python
 description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
 description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
 ```
 ```
 
 
 ##### Safely extract more optional metadata
 ##### Safely extract more optional metadata
+
+In this example, various optional metadata values are extracted from the `.result.video[0]` member of the parsed JSON `response`, which is expected to be a JS object, parsed into a `dict`, with no crash if that isn't so, or if any of the target values are missing or invalid.
+
 ```python
 ```python
-video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
+video = traverse_obj(response, ('result', 'video', 0, T(dict))) or {}
+# formerly:
+# video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
 description = video.get('summary')
 description = video.get('summary')
 duration = float_or_none(video.get('durationMs'), scale=1000)
 duration = float_or_none(video.get('durationMs'), scale=1000)
 view_count = int_or_none(video.get('views'))
 view_count = int_or_none(video.get('views'))
 ```
 ```
 
 
+#### Safely extract nested lists
+
+Suppose you've extracted JSON like this into a Python data structure named `media_json` using, say, the `_download_json()` or `_parse_json()` methods of `InfoExtractor`:
+```json
+{
+    "title": "Example video",
+    "comment": "try extracting this",
+    "media": [{
+        "type": "bad",
+        "size": 320,
+        "url": "https://some.cdn.site/bad.mp4"
+    }, {
+        "type": "streaming",
+        "url": "https://some.cdn.site/hls.m3u8"
+    }, {
+        "type": "super",
+        "size": 1280,
+        "url": "https://some.cdn.site/good.webm"
+    }],
+    "moreStuff": "more values",
+    ...
+}
+```
+
+Then extractor code like this can collect the various fields of the JSON:
+```python
+...
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    T,
+    traverse_obj,
+    txt_or_none,
+    url_or_none,
+)
+...
+        ...
+        info_dict = {}
+        # extract title and description if valid and not empty
+        info_dict.update(traverse_obj(media_json, {
+            'title': ('title', T(txt_or_none)),
+            'description': ('comment', T(txt_or_none)),
+        }))
+
+        # extract any recognisable media formats
+        fmts = []
+        # traverse into "media" list, extract `dict`s with desired keys
+        for fmt in traverse_obj(media_json, ('media', Ellipsis, {
+                'format_id': ('type', T(txt_or_none)),
+                'url': ('url', T(url_or_none)),
+                'width': ('size', T(int_or_none)), })):
+            # bad `fmt` values were `None` and removed
+            if 'url' not in fmt:
+                continue
+            fmt_url = fmt['url']  # known to be valid URL
+            ext = determine_ext(fmt_url)
+            if ext == 'm3u8':
+                fmts.extend(self._extract_m3u8_formats(fmt_url, video_id, 'mp4', fatal=False))
+            else:
+                fmt['ext'] = ext
+                fmts.append(fmt)
+
+        # sort, raise if no formats
+        self._sort_formats(fmts)
+
+        info_dict['formats'] = fmts
+        ...
+```
+The extractor raises an exception rather than random crashes if the JSON structure changes so that no formats are found.
+
 # EMBEDDING YOUTUBE-DL
 # EMBEDDING YOUTUBE-DL
 
 
 youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/ytdl-org/youtube-dl/issues/new).
 youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/ytdl-org/youtube-dl/issues/new).
@@ -1406,7 +1497,11 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
 
 
 # BUGS
 # BUGS
 
 
-Bugs and suggestions should be reported at: <https://github.com/ytdl-org/youtube-dl/issues>. Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](https://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
+Bugs and suggestions should be reported in the issue tracker: <https://github.com/ytdl-org/youtube-dl/issues> (<https://yt-dl.org/bug> is an alias for this). Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](https://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
+
+## Opening a bug report or suggestion
+
+Be sure to follow instructions provided **below** and **in the issue tracker**. Complete the appropriate issue template fully. Consider whether your problem is covered by an existing issue: if so, follow the discussion there. Avoid commenting on existing duplicate issues as such comments do not add to the discussion of the issue and are liable to be treated as spam.
 
 
 **Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
 **Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
 ```
 ```
@@ -1426,17 +1521,17 @@ $ youtube-dl -v <your command line>
 
 
 The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
 The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
 
 
-Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
+Finally please review your issue to avoid various common mistakes (you can and should use this as a checklist) listed below.
 
 
 ### Is the description of the issue itself sufficient?
 ### Is the description of the issue itself sufficient?
 
 
-We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
+We often get issue reports that are hard to understand. To avoid subsequent clarifications, and to assist participants who are not native English speakers, please elaborate on what feature you are requesting, or what bug you want to be fixed.
 
 
-So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
+Make sure that it's obvious
 
 
 - What the problem is
 - What the problem is
 - How it could be fixed
 - How it could be fixed
-- How your proposed solution would look like
+- How your proposed solution would look
 
 
 If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
 If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
 
 
@@ -1446,13 +1541,13 @@ If your server has multiple IPs or you suspect censorship, adding `--call-home`
 
 
 **Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
 **Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
 
 
-###  Are you using the latest version?
+###  Is the issue already documented?
 
 
-Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/ytdl-org/youtube-dl/search?type=Issues) of this repository. Initially, at least, use the search term `-label:duplicate` to focus on active issues. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
 
 
-###  Is the issue already documented?
+###  Are you using the latest version?
 
 
-Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/ytdl-org/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
 
 
 ###  Why are existing options not enough?
 ###  Why are existing options not enough?
 
 

+ 1 - 0
devscripts/__init__.py

@@ -0,0 +1 @@
+# Empty file needed to make devscripts.utils properly importable from outside

+ 7 - 4
devscripts/bash-completion.py

@@ -5,8 +5,12 @@ import os
 from os.path import dirname as dirn
 from os.path import dirname as dirn
 import sys
 import sys
 
 
-sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
 import youtube_dl
 import youtube_dl
+from youtube_dl.compat import compat_open as open
+
+from utils import read_file
 
 
 BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
 BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
@@ -18,9 +22,8 @@ def build_completion(opt_parser):
         for option in group.option_list:
         for option in group.option_list:
             # for every long flag
             # for every long flag
             opts_flag.append(option.get_opt_string())
             opts_flag.append(option.get_opt_string())
-    with open(BASH_COMPLETION_TEMPLATE) as f:
-        template = f.read()
-    with open(BASH_COMPLETION_FILE, "w") as f:
+    template = read_file(BASH_COMPLETION_TEMPLATE)
+    with open(BASH_COMPLETION_FILE, "w", encoding='utf-8') as f:
         # just using the special char
         # just using the special char
         filled_template = template.replace("{{flags}}", " ".join(opts_flag))
         filled_template = template.replace("{{flags}}", " ".join(opts_flag))
         f.write(filled_template)
         f.write(filled_template)

+ 83 - 0
devscripts/cli_to_api.py

@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+"""
+This script displays the API parameters corresponding to a yt-dl command line
+
+Example:
+$ ./cli_to_api.py -f best
+{u'format': 'best'}
+$
+"""
+
+# Allow direct execution
+import os
+import sys
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+import youtube_dl
+from types import MethodType
+
+
+def cli_to_api(*opts):
+    YDL = youtube_dl.YoutubeDL
+
+    # to extract the parsed options, break out of YoutubeDL instantiation
+
+    # return options via this Exception
+    class ParseYTDLResult(Exception):
+        def __init__(self, result):
+            super(ParseYTDLResult, self).__init__('result')
+            self.opts = result
+
+    # replacement constructor that raises ParseYTDLResult
+    def ytdl_init(ydl, ydl_opts):
+        super(YDL, ydl).__init__(ydl_opts)
+        raise ParseYTDLResult(ydl_opts)
+
+    # patch in the constructor
+    YDL.__init__ = MethodType(ytdl_init, YDL)
+
+    # core parser
+    def parsed_options(argv):
+        try:
+            youtube_dl._real_main(list(argv))
+        except ParseYTDLResult as result:
+            return result.opts
+
+    # from https://github.com/yt-dlp/yt-dlp/issues/5859#issuecomment-1363938900
+    default = parsed_options([])
+
+    def neq_opt(a, b):
+        if a == b:
+            return False
+        if a is None and repr(type(object)).endswith(".utils.DateRange'>"):
+            return '0001-01-01 - 9999-12-31' != '{0}'.format(b)
+        return a != b
+
+    diff = dict((k, v) for k, v in parsed_options(opts).items() if neq_opt(default[k], v))
+    if 'postprocessors' in diff:
+        diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default['postprocessors']]
+    return diff
+
+
+def main():
+    from pprint import PrettyPrinter
+
+    pprint = PrettyPrinter()
+    super_format = pprint.format
+
+    def format(object, context, maxlevels, level):
+        if repr(type(object)).endswith(".utils.DateRange'>"):
+            return '{0}: {1}>'.format(repr(object)[:-2], object), True, False
+        return super_format(object, context, maxlevels, level)
+
+    pprint.format = format
+
+    pprint.pprint(cli_to_api(*sys.argv))
+
+
+if __name__ == '__main__':
+    main()

+ 5 - 4
devscripts/create-github-release.py

@@ -1,7 +1,6 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
 import json
 import json
 import mimetypes
 import mimetypes
 import netrc
 import netrc
@@ -10,7 +9,9 @@ import os
 import re
 import re
 import sys
 import sys
 
 
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
 
 
 from youtube_dl.compat import (
 from youtube_dl.compat import (
     compat_basestring,
     compat_basestring,
@@ -22,6 +23,7 @@ from youtube_dl.utils import (
     make_HTTPS_handler,
     make_HTTPS_handler,
     sanitized_Request,
     sanitized_Request,
 )
 )
+from utils import read_file
 
 
 
 
 class GitHubReleaser(object):
 class GitHubReleaser(object):
@@ -89,8 +91,7 @@ def main():
 
 
     changelog_file, version, build_path = args
     changelog_file, version, build_path = args
 
 
-    with io.open(changelog_file, encoding='utf-8') as inf:
-        changelog = inf.read()
+    changelog = read_file(changelog_file)
 
 
     mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog)
     mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog)
     body = mobj.group(1) if mobj else ''
     body = mobj.group(1) if mobj else ''

+ 6 - 5
devscripts/fish-completion.py

@@ -6,10 +6,13 @@ import os
 from os.path import dirname as dirn
 from os.path import dirname as dirn
 import sys
 import sys
 
 
-sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
 import youtube_dl
 import youtube_dl
 from youtube_dl.utils import shell_quote
 from youtube_dl.utils import shell_quote
 
 
+from utils import read_file, write_file
+
 FISH_COMPLETION_FILE = 'youtube-dl.fish'
 FISH_COMPLETION_FILE = 'youtube-dl.fish'
 FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
 FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
 
 
@@ -38,11 +41,9 @@ def build_completion(opt_parser):
             complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
             complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
             commands.append(shell_quote(complete_cmd))
             commands.append(shell_quote(complete_cmd))
 
 
-    with open(FISH_COMPLETION_TEMPLATE) as f:
-        template = f.read()
+    template = read_file(FISH_COMPLETION_TEMPLATE)
     filled_template = template.replace('{{commands}}', '\n'.join(commands))
     filled_template = template.replace('{{commands}}', '\n'.join(commands))
-    with open(FISH_COMPLETION_FILE, 'w') as f:
-        f.write(filled_template)
+    write_file(FISH_COMPLETION_FILE, filled_template)
 
 
 
 
 parser = youtube_dl.parseOpts()[0]
 parser = youtube_dl.parseOpts()[0]

+ 10 - 5
devscripts/gh-pages/add-version.py

@@ -6,16 +6,21 @@ import sys
 import hashlib
 import hashlib
 import os.path
 import os.path
 
 
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
+
+from devscripts.utils import read_file, write_file
+from youtube_dl.compat import compat_open as open
 
 
 if len(sys.argv) <= 1:
 if len(sys.argv) <= 1:
     print('Specify the version number as parameter')
     print('Specify the version number as parameter')
     sys.exit()
     sys.exit()
 version = sys.argv[1]
 version = sys.argv[1]
 
 
-with open('update/LATEST_VERSION', 'w') as f:
-    f.write(version)
+write_file('update/LATEST_VERSION', version)
 
 
-versions_info = json.load(open('update/versions.json'))
+versions_info = json.loads(read_file('update/versions.json'))
 if 'signature' in versions_info:
 if 'signature' in versions_info:
     del versions_info['signature']
     del versions_info['signature']
 
 
@@ -39,5 +44,5 @@ for key, filename in filenames.items():
 versions_info['versions'][version] = new_version
 versions_info['versions'][version] = new_version
 versions_info['latest'] = version
 versions_info['latest'] = version
 
 
-with open('update/versions.json', 'w') as jsonf:
-    json.dump(versions_info, jsonf, indent=4, sort_keys=True)
+with open('update/versions.json', 'w', encoding='utf-8') as jsonf:
+    json.dumps(versions_info, jsonf, indent=4, sort_keys=True)

+ 12 - 5
devscripts/gh-pages/generate-download.py

@@ -2,14 +2,21 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import json
 import json
+import os.path
+import sys
 
 
-versions_info = json.load(open('update/versions.json'))
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
+
+from utils import read_file, write_file
+
+versions_info = json.loads(read_file('update/versions.json'))
 version = versions_info['latest']
 version = versions_info['latest']
 version_dict = versions_info['versions'][version]
 version_dict = versions_info['versions'][version]
 
 
 # Read template page
 # Read template page
-with open('download.html.in', 'r', encoding='utf-8') as tmplf:
-    template = tmplf.read()
+template = read_file('download.html.in')
 
 
 template = template.replace('@PROGRAM_VERSION@', version)
 template = template.replace('@PROGRAM_VERSION@', version)
 template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
 template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
@@ -18,5 +25,5 @@ template = template.replace('@EXE_URL@', version_dict['exe'][0])
 template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
 template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
 template = template.replace('@TAR_URL@', version_dict['tar'][0])
 template = template.replace('@TAR_URL@', version_dict['tar'][0])
 template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
 template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
-with open('download.html', 'w', encoding='utf-8') as dlf:
-    dlf.write(template)
+
+write_file('download.html', template)

+ 11 - 6
devscripts/gh-pages/update-copyright.py

@@ -5,17 +5,22 @@ from __future__ import with_statement, unicode_literals
 
 
 import datetime
 import datetime
 import glob
 import glob
-import io  # For Python 2 compatibility
 import os
 import os
 import re
 import re
+import sys
 
 
-year = str(datetime.datetime.now().year)
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
+
+from devscripts.utils import read_file, write_file
+from youtube_dl import compat_str
+
+year = compat_str(datetime.datetime.now().year)
 for fn in glob.glob('*.html*'):
 for fn in glob.glob('*.html*'):
-    with io.open(fn, encoding='utf-8') as f:
-        content = f.read()
+    content = read_file(fn)
     newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content)
     newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content)
     if content != newc:
     if content != newc:
         tmpFn = fn + '.part'
         tmpFn = fn + '.part'
-        with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
-            outf.write(newc)
+        write_file(tmpFn, newc)
         os.rename(tmpFn, fn)
         os.rename(tmpFn, fn)

+ 8 - 3
devscripts/gh-pages/update-feed.py

@@ -2,10 +2,16 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import datetime
 import datetime
-import io
 import json
 import json
+import os.path
 import textwrap
 import textwrap
+import sys
 
 
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
+from utils import write_file
 
 
 atom_template = textwrap.dedent("""\
 atom_template = textwrap.dedent("""\
     <?xml version="1.0" encoding="utf-8"?>
     <?xml version="1.0" encoding="utf-8"?>
@@ -72,5 +78,4 @@ for v in versions:
 entries_str = textwrap.indent(''.join(entries), '\t')
 entries_str = textwrap.indent(''.join(entries), '\t')
 atom_template = atom_template.replace('@ENTRIES@', entries_str)
 atom_template = atom_template.replace('@ENTRIES@', entries_str)
 
 
-with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
-    atom_file.write(atom_template)
+write_file('update/releases.atom', atom_template)

+ 6 - 5
devscripts/gh-pages/update-sites.py

@@ -5,15 +5,17 @@ import sys
 import os
 import os
 import textwrap
 import textwrap
 
 
+dirn = os.path.dirname
+
 # We must be able to import youtube_dl
 # We must be able to import youtube_dl
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
 
 
 import youtube_dl
 import youtube_dl
+from devscripts.utils import read_file, write_file
 
 
 
 
 def main():
 def main():
-    with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
-        template = tmplf.read()
+    template = read_file('supportedsites.html.in')
 
 
     ie_htmls = []
     ie_htmls = []
     for ie in youtube_dl.list_extractors(age_limit=None):
     for ie in youtube_dl.list_extractors(age_limit=None):
@@ -29,8 +31,7 @@ def main():
 
 
     template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
     template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
 
 
-    with open('supportedsites.html', 'w', encoding='utf-8') as sitesf:
-        sitesf.write(template)
+    write_file('supportedsites.html', template)
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 4 - 5
devscripts/make_contributing.py

@@ -1,10 +1,11 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
 import optparse
 import optparse
 import re
 import re
 
 
+from utils import read_file, write_file
+
 
 
 def main():
 def main():
     parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
     parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
@@ -14,8 +15,7 @@ def main():
 
 
     infile, outfile = args
     infile, outfile = args
 
 
-    with io.open(infile, encoding='utf-8') as inf:
-        readme = inf.read()
+    readme = read_file(infile)
 
 
     bug_text = re.search(
     bug_text = re.search(
         r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
         r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
@@ -25,8 +25,7 @@ def main():
 
 
     out = bug_text + dev_text
     out = bug_text + dev_text
 
 
-    with io.open(outfile, 'w', encoding='utf-8') as outf:
-        outf.write(out)
+    write_file(outfile, out)
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 7 - 10
devscripts/make_issue_template.py

@@ -1,8 +1,11 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
 import optparse
 import optparse
+import os.path
+import sys
+
+from utils import read_file, read_version, write_file
 
 
 
 
 def main():
 def main():
@@ -13,17 +16,11 @@ def main():
 
 
     infile, outfile = args
     infile, outfile = args
 
 
-    with io.open(infile, encoding='utf-8') as inf:
-        issue_template_tmpl = inf.read()
-
-    # Get the version from youtube_dl/version.py without importing the package
-    exec(compile(open('youtube_dl/version.py').read(),
-                 'youtube_dl/version.py', 'exec'))
+    issue_template_tmpl = read_file(infile)
 
 
-    out = issue_template_tmpl % {'version': locals()['__version__']}
+    out = issue_template_tmpl % {'version': read_version()}
 
 
-    with io.open(outfile, 'w', encoding='utf-8') as outf:
-        outf.write(out)
+    write_file(outfile, out)
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     main()
     main()

+ 41 - 10
devscripts/make_lazy_extractors.py

@@ -1,28 +1,49 @@
 from __future__ import unicode_literals, print_function
 from __future__ import unicode_literals, print_function
 
 
 from inspect import getsource
 from inspect import getsource
-import io
 import os
 import os
 from os.path import dirname as dirn
 from os.path import dirname as dirn
+import re
 import sys
 import sys
 
 
 print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
 print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
 
 
-sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
 
 
 lazy_extractors_filename = sys.argv[1]
 lazy_extractors_filename = sys.argv[1]
 if os.path.exists(lazy_extractors_filename):
 if os.path.exists(lazy_extractors_filename):
     os.remove(lazy_extractors_filename)
     os.remove(lazy_extractors_filename)
+# Py2: may be confused by leftover lazy_extractors.pyc
+if sys.version_info[0] < 3:
+    for c in ('c', 'o'):
+        try:
+            os.remove(lazy_extractors_filename + 'c')
+        except OSError:
+            pass
+
+from devscripts.utils import read_file, write_file
+from youtube_dl.compat import compat_register_utf8
+
+compat_register_utf8()
 
 
 from youtube_dl.extractor import _ALL_CLASSES
 from youtube_dl.extractor import _ALL_CLASSES
 from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
 from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
 
 
-with open('devscripts/lazy_load_template.py', 'rt') as f:
-    module_template = f.read()
+module_template = read_file('devscripts/lazy_load_template.py')
+
+
+def get_source(m):
+    return re.sub(r'(?m)^\s*#.*\n', '', getsource(m))
+
 
 
 module_contents = [
 module_contents = [
-    module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
-    'class LazyLoadSearchExtractor(LazyLoadExtractor):\n    pass\n']
+    module_template,
+    get_source(InfoExtractor.suitable),
+    get_source(InfoExtractor._match_valid_url) + '\n',
+    'class LazyLoadSearchExtractor(LazyLoadExtractor):\n    pass\n',
+    # needed for suitable() methods of Youtube extractor (see #28780)
+    'from youtube_dl.utils import parse_qs, variadic\n',
+]
 
 
 ie_template = '''
 ie_template = '''
 class {name}({bases}):
 class {name}({bases}):
@@ -54,7 +75,7 @@ def build_lazy_ie(ie, name):
         valid_url=valid_url,
         valid_url=valid_url,
         module=ie.__module__)
         module=ie.__module__)
     if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
     if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
-        s += '\n' + getsource(ie.suitable)
+        s += '\n' + get_source(ie.suitable)
     if hasattr(ie, '_make_valid_url'):
     if hasattr(ie, '_make_valid_url'):
         # search extractors
         # search extractors
         s += make_valid_template.format(valid_url=ie._make_valid_url())
         s += make_valid_template.format(valid_url=ie._make_valid_url())
@@ -94,7 +115,17 @@ for ie in ordered_cls:
 module_contents.append(
 module_contents.append(
     '_ALL_CLASSES = [{0}]'.format(', '.join(names)))
     '_ALL_CLASSES = [{0}]'.format(', '.join(names)))
 
 
-module_src = '\n'.join(module_contents) + '\n'
+module_src = '\n'.join(module_contents)
+
+write_file(lazy_extractors_filename, module_src + '\n')
 
 
-with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
-    f.write(module_src)
+# work around JVM byte code module limit in Jython
+if sys.platform.startswith('java') and sys.version_info[:2] == (2, 7):
+    import subprocess
+    from youtube_dl.compat import compat_subprocess_get_DEVNULL
+    # if Python 2.7 is available, use it to compile the module for Jython
+    try:
+        # if Python 2.7 is available, use it to compile the module for Jython
+        subprocess.check_call(['python2.7', '-m', 'py_compile', lazy_extractors_filename], stdout=compat_subprocess_get_DEVNULL())
+    except Exception:
+        pass

+ 10 - 5
devscripts/make_readme.py

@@ -1,8 +1,14 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
-import sys
+import os.path
 import re
 import re
+import sys
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
+from utils import read_file
+from youtube_dl.compat import compat_open as open
 
 
 README_FILE = 'README.md'
 README_FILE = 'README.md'
 helptext = sys.stdin.read()
 helptext = sys.stdin.read()
@@ -10,8 +16,7 @@ helptext = sys.stdin.read()
 if isinstance(helptext, bytes):
 if isinstance(helptext, bytes):
     helptext = helptext.decode('utf-8')
     helptext = helptext.decode('utf-8')
 
 
-with io.open(README_FILE, encoding='utf-8') as f:
-    oldreadme = f.read()
+oldreadme = read_file(README_FILE)
 
 
 header = oldreadme[:oldreadme.index('# OPTIONS')]
 header = oldreadme[:oldreadme.index('# OPTIONS')]
 footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
 footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
@@ -20,7 +25,7 @@ options = helptext[helptext.index('  General Options:') + 19:]
 options = re.sub(r'(?m)^  (\w.+)$', r'## \1', options)
 options = re.sub(r'(?m)^  (\w.+)$', r'## \1', options)
 options = '# OPTIONS\n' + options + '\n'
 options = '# OPTIONS\n' + options + '\n'
 
 
-with io.open(README_FILE, 'w', encoding='utf-8') as f:
+with open(README_FILE, 'w', encoding='utf-8') as f:
     f.write(header)
     f.write(header)
     f.write(options)
     f.write(options)
     f.write(footer)
     f.write(footer)

+ 8 - 7
devscripts/make_supportedsites.py

@@ -1,17 +1,19 @@
 #!/usr/bin/env python
 #!/usr/bin/env python
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
 import optparse
 import optparse
-import os
+import os.path
 import sys
 import sys
 
 
-
 # Import youtube_dl
 # Import youtube_dl
-ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
-sys.path.insert(0, ROOT_DIR)
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
 import youtube_dl
 import youtube_dl
 
 
+from utils import write_file
+
 
 
 def main():
 def main():
     parser = optparse.OptionParser(usage='%prog OUTFILE.md')
     parser = optparse.OptionParser(usage='%prog OUTFILE.md')
@@ -38,8 +40,7 @@ def main():
         ' - ' + md + '\n'
         ' - ' + md + '\n'
         for md in gen_ies_md(ies))
         for md in gen_ies_md(ies))
 
 
-    with io.open(outfile, 'w', encoding='utf-8') as outf:
-        outf.write(out)
+    write_file(outfile, out)
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 4 - 6
devscripts/prepare_manpage.py

@@ -1,13 +1,13 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import io
 import optparse
 import optparse
 import os.path
 import os.path
 import re
 import re
 
 
+from utils import read_file, write_file
+
 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 README_FILE = os.path.join(ROOT_DIR, 'README.md')
 README_FILE = os.path.join(ROOT_DIR, 'README.md')
-
 PREFIX = r'''%YOUTUBE-DL(1)
 PREFIX = r'''%YOUTUBE-DL(1)
 
 
 # NAME
 # NAME
@@ -29,8 +29,7 @@ def main():
 
 
     outfile, = args
     outfile, = args
 
 
-    with io.open(README_FILE, encoding='utf-8') as f:
-        readme = f.read()
+    readme = read_file(README_FILE)
 
 
     readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
     readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
     readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
     readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
@@ -38,8 +37,7 @@ def main():
 
 
     readme = filter_options(readme)
     readme = filter_options(readme)
 
 
-    with io.open(outfile, 'w', encoding='utf-8') as outf:
-        outf.write(readme)
+    write_file(outfile, readme)
 
 
 
 
 def filter_options(readme):
 def filter_options(readme):

+ 62 - 0
devscripts/utils.py

@@ -0,0 +1,62 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import argparse
+import functools
+import os.path
+import subprocess
+import sys
+
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
+
+from youtube_dl.compat import (
+    compat_kwargs,
+    compat_open as open,
+)
+
+
+def read_file(fname):
+    with open(fname, encoding='utf-8') as f:
+        return f.read()
+
+
+def write_file(fname, content, mode='w'):
+    with open(fname, mode, encoding='utf-8') as f:
+        return f.write(content)
+
+
+def read_version(fname='youtube_dl/version.py'):
+    """Get the version without importing the package"""
+    exec(compile(read_file(fname), fname, 'exec'))
+    return locals()['__version__']
+
+
+def get_filename_args(has_infile=False, default_outfile=None):
+    parser = argparse.ArgumentParser()
+    if has_infile:
+        parser.add_argument('infile', help='Input file')
+    kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {}
+    kwargs['help'] = 'Output file'
+    parser.add_argument('outfile', **compat_kwargs(kwargs))
+
+    opts = parser.parse_args()
+    if has_infile:
+        return opts.infile, opts.outfile
+    return opts.outfile
+
+
+def compose_functions(*functions):
+    return lambda x: functools.reduce(lambda y, f: f(y), functions, x)
+
+
+def run_process(*args, **kwargs):
+    kwargs.setdefault('text', True)
+    kwargs.setdefault('check', True)
+    kwargs.setdefault('capture_output', True)
+    if kwargs['text']:
+        kwargs.setdefault('encoding', 'utf-8')
+        kwargs.setdefault('errors', 'replace')
+        kwargs = compat_kwargs(kwargs)
+    return subprocess.run(args, **kwargs)

+ 4 - 4
devscripts/zsh-completion.py

@@ -7,6 +7,8 @@ import sys
 
 
 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
 import youtube_dl
 import youtube_dl
+from utils import read_file, write_file
+
 
 
 ZSH_COMPLETION_FILE = "youtube-dl.zsh"
 ZSH_COMPLETION_FILE = "youtube-dl.zsh"
 ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
 ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
@@ -34,15 +36,13 @@ def build_completion(opt_parser):
 
 
     flags = [opt.get_opt_string() for opt in opts]
     flags = [opt.get_opt_string() for opt in opts]
 
 
-    with open(ZSH_COMPLETION_TEMPLATE) as f:
-        template = f.read()
+    template = read_file(ZSH_COMPLETION_TEMPLATE)
 
 
     template = template.replace("{{fileopts}}", "|".join(fileopts))
     template = template.replace("{{fileopts}}", "|".join(fileopts))
     template = template.replace("{{diropts}}", "|".join(diropts))
     template = template.replace("{{diropts}}", "|".join(diropts))
     template = template.replace("{{flags}}", " ".join(flags))
     template = template.replace("{{flags}}", " ".join(flags))
 
 
-    with open(ZSH_COMPLETION_FILE, "w") as f:
-        f.write(template)
+    write_file(ZSH_COMPLETION_FILE, template)
 
 
 
 
 parser = youtube_dl.parseOpts()[0]
 parser = youtube_dl.parseOpts()[0]

+ 34 - 31
test/helper.py

@@ -1,22 +1,24 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import errno
 import errno
-import io
 import hashlib
 import hashlib
 import json
 import json
 import os.path
 import os.path
 import re
 import re
-import types
 import ssl
 import ssl
 import sys
 import sys
+import types
+import unittest
 
 
 import youtube_dl.extractor
 import youtube_dl.extractor
 from youtube_dl import YoutubeDL
 from youtube_dl import YoutubeDL
 from youtube_dl.compat import (
 from youtube_dl.compat import (
+    compat_open as open,
     compat_os_name,
     compat_os_name,
     compat_str,
     compat_str,
 )
 )
 from youtube_dl.utils import (
 from youtube_dl.utils import (
+    IDENTITY,
     preferredencoding,
     preferredencoding,
     write_string,
     write_string,
 )
 )
@@ -27,10 +29,10 @@ def get_params(override=None):
                                    "parameters.json")
                                    "parameters.json")
     LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
     LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                          "local_parameters.json")
                                          "local_parameters.json")
-    with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
+    with open(PARAMETERS_FILE, encoding='utf-8') as pf:
         parameters = json.load(pf)
         parameters = json.load(pf)
     if os.path.exists(LOCAL_PARAMETERS_FILE):
     if os.path.exists(LOCAL_PARAMETERS_FILE):
-        with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
+        with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
             parameters.update(json.load(pf))
             parameters.update(json.load(pf))
     if override:
     if override:
         parameters.update(override)
         parameters.update(override)
@@ -72,7 +74,8 @@ class FakeYDL(YoutubeDL):
     def to_screen(self, s, skip_eol=None):
     def to_screen(self, s, skip_eol=None):
         print(s)
         print(s)
 
 
-    def trouble(self, s, tb=None):
+    def trouble(self, *args, **kwargs):
+        s = args[0] if len(args) > 0 else kwargs.get('message', 'Missing message')
         raise Exception(s)
         raise Exception(s)
 
 
     def download(self, x):
     def download(self, x):
@@ -89,6 +92,17 @@ class FakeYDL(YoutubeDL):
         self.report_warning = types.MethodType(report_warning, self)
         self.report_warning = types.MethodType(report_warning, self)
 
 
 
 
+class FakeLogger(object):
+    def debug(self, msg):
+        pass
+
+    def warning(self, msg):
+        pass
+
+    def error(self, msg):
+        pass
+
+
 def gettestcases(include_onlymatching=False):
 def gettestcases(include_onlymatching=False):
     for ie in youtube_dl.extractor.gen_extractors():
     for ie in youtube_dl.extractor.gen_extractors():
         for tc in ie.get_testcases(include_onlymatching):
         for tc in ie.get_testcases(include_onlymatching):
@@ -128,6 +142,12 @@ def expect_value(self, got, expected, field):
         self.assertTrue(
         self.assertTrue(
             contains_str in got,
             contains_str in got,
             'field %s (value: %r) should contain %r' % (field, got, contains_str))
             'field %s (value: %r) should contain %r' % (field, got, contains_str))
+    elif isinstance(expected, compat_str) and re.match(r'lambda \w+:', expected):
+        fn = eval(expected)
+        suite = expected.split(':', 1)[1].strip()
+        self.assertTrue(
+            fn(got),
+            'Expected field %s to meet condition %s, but value %r failed ' % (field, suite, got))
     elif isinstance(expected, type):
     elif isinstance(expected, type):
         self.assertTrue(
         self.assertTrue(
             isinstance(got, expected),
             isinstance(got, expected),
@@ -137,7 +157,7 @@ def expect_value(self, got, expected, field):
     elif isinstance(expected, list) and isinstance(got, list):
     elif isinstance(expected, list) and isinstance(got, list):
         self.assertEqual(
         self.assertEqual(
             len(expected), len(got),
             len(expected), len(got),
-            'Expect a list of length %d, but got a list of length %d for field %s' % (
+            'Expected a list of length %d, but got a list of length %d for field %s' % (
                 len(expected), len(got), field))
                 len(expected), len(got), field))
         for index, (item_got, item_expected) in enumerate(zip(got, expected)):
         for index, (item_got, item_expected) in enumerate(zip(got, expected)):
             type_got = type(item_got)
             type_got = type(item_got)
@@ -161,18 +181,18 @@ def expect_value(self, got, expected, field):
             op, _, expected_num = expected.partition(':')
             op, _, expected_num = expected.partition(':')
             expected_num = int(expected_num)
             expected_num = int(expected_num)
             if op == 'mincount':
             if op == 'mincount':
-                assert_func = assertGreaterEqual
+                assert_func = self.assertGreaterEqual
                 msg_tmpl = 'Expected %d items in field %s, but only got %d'
                 msg_tmpl = 'Expected %d items in field %s, but only got %d'
             elif op == 'maxcount':
             elif op == 'maxcount':
-                assert_func = assertLessEqual
+                assert_func = self.assertLessEqual
                 msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
                 msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
             elif op == 'count':
             elif op == 'count':
-                assert_func = assertEqual
+                assert_func = self.assertEqual
                 msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
                 msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
             else:
             else:
                 assert False
                 assert False
             assert_func(
             assert_func(
-                self, len(got), expected_num,
+                len(got), expected_num,
                 msg_tmpl % (expected_num, field, len(got)))
                 msg_tmpl % (expected_num, field, len(got)))
             return
             return
         self.assertEqual(
         self.assertEqual(
@@ -242,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
             self.assertTrue(m, msg)
             self.assertTrue(m, msg)
 
 
 
 
-def assertGreaterEqual(self, got, expected, msg=None):
-    if not (got >= expected):
-        if msg is None:
-            msg = '%r not greater than or equal to %r' % (got, expected)
-        self.assertTrue(got >= expected, msg)
-
-
-def assertLessEqual(self, got, expected, msg=None):
-    if not (got <= expected):
-        if msg is None:
-            msg = '%r not less than or equal to %r' % (got, expected)
-        self.assertTrue(got <= expected, msg)
-
-
-def assertEqual(self, got, expected, msg=None):
-    if not (got == expected):
-        if msg is None:
-            msg = '%r not equal to %r' % (got, expected)
-        self.assertTrue(got == expected, msg)
-
-
 def expect_warnings(ydl, warnings_re):
 def expect_warnings(ydl, warnings_re):
     real_warning = ydl.report_warning
     real_warning = ydl.report_warning
 
 
@@ -280,3 +279,7 @@ def http_server_port(httpd):
     else:
     else:
         sock = httpd.socket
         sock = httpd.socket
     return sock.getsockname()[1]
     return sock.getsockname()[1]
+
+
+def expectedFailureIf(cond):
+    return unittest.expectedFailure if cond else IDENTITY

+ 296 - 21
test/test_InfoExtractor.py

@@ -3,19 +3,37 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 # Allow direct execution
 # Allow direct execution
-import io
 import os
 import os
 import sys
 import sys
 import unittest
 import unittest
+
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
-from youtube_dl.compat import compat_etree_fromstring, compat_http_server
-from youtube_dl.extractor.common import InfoExtractor
-from youtube_dl.extractor import YoutubeIE, get_info_extractor
-from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
 import threading
 import threading
 
 
+from test.helper import (
+    expect_dict,
+    expect_value,
+    FakeYDL,
+    http_server_port,
+)
+from youtube_dl.compat import (
+    compat_etree_fromstring,
+    compat_http_server,
+    compat_open as open,
+)
+from youtube_dl.extractor.common import InfoExtractor
+from youtube_dl.extractor import (
+    get_info_extractor,
+    YoutubeIE,
+)
+from youtube_dl.utils import (
+    encode_data_uri,
+    ExtractorError,
+    RegexNotFoundError,
+    strip_jsonp,
+)
+
 
 
 TEAPOT_RESPONSE_STATUS = 418
 TEAPOT_RESPONSE_STATUS = 418
 TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
 TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
@@ -35,13 +53,13 @@ class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler)
             assert False
             assert False
 
 
 
 
-class TestIE(InfoExtractor):
+class DummyIE(InfoExtractor):
     pass
     pass
 
 
 
 
 class TestInfoExtractor(unittest.TestCase):
 class TestInfoExtractor(unittest.TestCase):
     def setUp(self):
     def setUp(self):
-        self.ie = TestIE(FakeYDL())
+        self.ie = DummyIE(FakeYDL())
 
 
     def test_ie_key(self):
     def test_ie_key(self):
         self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
         self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
@@ -62,6 +80,7 @@ class TestInfoExtractor(unittest.TestCase):
             <meta name="og:test1" content='foo > < bar'/>
             <meta name="og:test1" content='foo > < bar'/>
             <meta name="og:test2" content="foo >//< bar"/>
             <meta name="og:test2" content="foo >//< bar"/>
             <meta property=og-test3 content='Ill-formatted opengraph'/>
             <meta property=og-test3 content='Ill-formatted opengraph'/>
+            <meta property=og:test4 content=unquoted-value/>
             '''
             '''
         self.assertEqual(ie._og_search_title(html), 'Foo')
         self.assertEqual(ie._og_search_title(html), 'Foo')
         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
@@ -74,6 +93,7 @@ class TestInfoExtractor(unittest.TestCase):
         self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar')
         self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar')
         self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True)
+        self.assertEqual(ie._og_search_property('test4', html), 'unquoted-value')
 
 
     def test_html_search_meta(self):
     def test_html_search_meta(self):
         ie = self.ie
         ie = self.ie
@@ -98,6 +118,74 @@ class TestInfoExtractor(unittest.TestCase):
         self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
 
 
+    def test_search_nextjs_data(self):
+        html = '''
+<!DOCTYPE html>
+<html>
+<head>
+  <meta http-equiv="content-type" content=
+  "text/html; charset=utf-8">
+  <meta name="viewport" content="width=device-width">
+  <title>Test _search_nextjs_data()</title>
+</head>
+<body>
+  <div id="__next">
+    <div style="background-color:#17171E" class="FU" dir="ltr">
+      <div class="sc-93de261d-0 dyzzYE">
+        <div>
+          <header class="HD"></header>
+          <main class="MN">
+            <div style="height:0" class="HT0">
+              <div style="width:NaN%" data-testid=
+              "stream-container" class="WDN"></div>
+            </div>
+          </main>
+        </div>
+        <footer class="sc-6e5faf91-0 dEGaHS"></footer>
+      </div>
+    </div>
+  </div>
+  <script id="__NEXT_DATA__" type="application/json">
+  {"props":{"pageProps":{"video":{"id":"testid"}}}}
+  </script>
+</body>
+</html>
+'''
+        search = self.ie._search_nextjs_data(html, 'testID')
+        self.assertEqual(search['props']['pageProps']['video']['id'], 'testid')
+        search = self.ie._search_nextjs_data(
+            'no next.js data here, move along', 'testID', default={'status': 0})
+        self.assertEqual(search['status'], 0)
+
+    def test_search_nuxt_data(self):
+        html = '''
+<!DOCTYPE html>
+<html>
+<head>
+  <meta http-equiv="content-type" content=
+  "text/html; charset=utf-8">
+  <title>Nuxt.js Test Page</title>
+  <meta name="viewport" content=
+  "width=device-width, initial-scale=1">
+  <meta data-hid="robots" name="robots" content="all">
+</head>
+<body class="BD">
+  <div id="__layout">
+    <h1 class="H1">Example heading</h1>
+    <div class="IN">
+      <p>Decoy text</p>
+    </div>
+  </div>
+  <script>
+  window.__NUXT__=(function(a,b,c,d,e,f,g,h){return {decoy:" default",data:[{track:{id:f,title:g}}]}}(null,null,"c",null,null,"testid","Nuxt.js title",null));
+  </script>
+  <script src="/_nuxt/a12345b.js" defer="defer"></script>
+</body>
+</html>
+'''
+        search = self.ie._search_nuxt_data(html, 'testID')
+        self.assertEqual(search['track']['id'], 'testid')
+
     def test_search_json_ld_realworld(self):
     def test_search_json_ld_realworld(self):
         # https://github.com/ytdl-org/youtube-dl/issues/23306
         # https://github.com/ytdl-org/youtube-dl/issues/23306
         expect_dict(
         expect_dict(
@@ -346,6 +434,24 @@ class TestInfoExtractor(unittest.TestCase):
                 }],
                 }],
             })
             })
 
 
+        # from https://0000.studio/
+        # with type attribute but without extension in URL
+        expect_dict(
+            self,
+            self.ie._parse_html5_media_entries(
+                'https://0000.studio',
+                r'''
+                <video src="https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92"
+                    controls="controls" type="video/mp4" preload="metadata" autoplay="autoplay" playsinline class="object-contain">
+                </video>
+                ''', None)[0],
+            {
+                'formats': [{
+                    'url': 'https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92',
+                    'ext': 'mp4',
+                }],
+            })
+
     def test_extract_jwplayer_data_realworld(self):
     def test_extract_jwplayer_data_realworld(self):
         # from http://www.suffolk.edu/sjc/
         # from http://www.suffolk.edu/sjc/
         expect_dict(
         expect_dict(
@@ -799,8 +905,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
         ]
         ]
 
 
         for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
         for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
-            with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
-                         mode='r', encoding='utf-8') as f:
+            with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
+                      mode='r', encoding='utf-8') as f:
                 formats = self.ie._parse_m3u8_formats(
                 formats = self.ie._parse_m3u8_formats(
                     f.read(), m3u8_url, ext='mp4')
                     f.read(), m3u8_url, ext='mp4')
                 self.ie._sort_formats(formats)
                 self.ie._sort_formats(formats)
@@ -890,7 +996,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
                     'tbr': 5997.485,
                     'tbr': 5997.485,
                     'width': 1920,
                     'width': 1920,
                     'height': 1080,
                     'height': 1080,
-                }]
+                }],
+                {},
             ), (
             ), (
                 # https://github.com/ytdl-org/youtube-dl/pull/14844
                 # https://github.com/ytdl-org/youtube-dl/pull/14844
                 'urls_only',
                 'urls_only',
@@ -973,7 +1080,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
                     'tbr': 4400,
                     'tbr': 4400,
                     'width': 1920,
                     'width': 1920,
                     'height': 1080,
                     'height': 1080,
-                }]
+                }],
+                {},
             ), (
             ), (
                 # https://github.com/ytdl-org/youtube-dl/issues/20346
                 # https://github.com/ytdl-org/youtube-dl/issues/20346
                 # Media considered unfragmented even though it contains
                 # Media considered unfragmented even though it contains
@@ -1019,18 +1127,185 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
                     'width': 360,
                     'width': 360,
                     'height': 360,
                     'height': 360,
                     'fps': 30,
                     'fps': 30,
-                }]
+                }],
+                {},
+            ), (
+                # https://github.com/ytdl-org/youtube-dl/issues/30235
+                # Bento4 generated test mpd
+                # mp4dash --mpd-name=manifest.mpd --no-split --use-segment-list mediafiles
+                'url_and_range',
+                'http://unknown/manifest.mpd',  # mpd_url
+                'http://unknown/',  # mpd_base_url
+                [{
+                    'manifest_url': 'http://unknown/manifest.mpd',
+                    'fragment_base_url': 'http://unknown/',
+                    'ext': 'm4a',
+                    'format_id': 'audio-und-mp4a.40.2',
+                    'format_note': 'DASH audio',
+                    'container': 'm4a_dash',
+                    'protocol': 'http_dash_segments',
+                    'acodec': 'mp4a.40.2',
+                    'vcodec': 'none',
+                    'tbr': 98.808,
+                }, {
+                    'manifest_url': 'http://unknown/manifest.mpd',
+                    'fragment_base_url': 'http://unknown/',
+                    'ext': 'mp4',
+                    'format_id': 'video-avc1',
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'protocol': 'http_dash_segments',
+                    'acodec': 'none',
+                    'vcodec': 'avc1.4D401E',
+                    'tbr': 699.597,
+                    'width': 768,
+                    'height': 432
+                }],
+                {},
+            ), (
+                # https://github.com/ytdl-org/youtube-dl/issues/27575
+                # GPAC generated test mpd
+                # MP4Box -dash 10000 -single-file -out manifest.mpd mediafiles
+                'range_only',
+                'http://unknown/manifest.mpd',  # mpd_url
+                'http://unknown/',  # mpd_base_url
+                [{
+                    'manifest_url': 'http://unknown/manifest.mpd',
+                    'fragment_base_url': 'http://unknown/audio_dashinit.mp4',
+                    'ext': 'm4a',
+                    'format_id': '2',
+                    'format_note': 'DASH audio',
+                    'container': 'm4a_dash',
+                    'protocol': 'http_dash_segments',
+                    'acodec': 'mp4a.40.2',
+                    'vcodec': 'none',
+                    'tbr': 98.096,
+                }, {
+                    'manifest_url': 'http://unknown/manifest.mpd',
+                    'fragment_base_url': 'http://unknown/video_dashinit.mp4',
+                    'ext': 'mp4',
+                    'format_id': '1',
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'protocol': 'http_dash_segments',
+                    'acodec': 'none',
+                    'vcodec': 'avc1.4D401E',
+                    'tbr': 526.987,
+                    'width': 768,
+                    'height': 432
+                }],
+                {},
+            ), (
+                'subtitles',
+                'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/',
+                [{
+                    'format_id': 'audio=128001',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'm4a',
+                    'tbr': 128.001,
+                    'asr': 48000,
+                    'format_note': 'DASH audio',
+                    'container': 'm4a_dash',
+                    'vcodec': 'none',
+                    'acodec': 'mp4a.40.2',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }, {
+                    'format_id': 'video=100000',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'mp4',
+                    'width': 336,
+                    'height': 144,
+                    'tbr': 100,
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'vcodec': 'avc1.4D401F',
+                    'acodec': 'none',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }, {
+                    'format_id': 'video=326000',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'mp4',
+                    'width': 562,
+                    'height': 240,
+                    'tbr': 326,
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'vcodec': 'avc1.4D401F',
+                    'acodec': 'none',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }, {
+                    'format_id': 'video=698000',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'mp4',
+                    'width': 844,
+                    'height': 360,
+                    'tbr': 698,
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'vcodec': 'avc1.4D401F',
+                    'acodec': 'none',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }, {
+                    'format_id': 'video=1493000',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'mp4',
+                    'width': 1126,
+                    'height': 480,
+                    'tbr': 1493,
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'vcodec': 'avc1.4D401F',
+                    'acodec': 'none',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }, {
+                    'format_id': 'video=4482000',
+                    'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'ext': 'mp4',
+                    'width': 1688,
+                    'height': 720,
+                    'tbr': 4482,
+                    'format_note': 'DASH video',
+                    'container': 'mp4_dash',
+                    'vcodec': 'avc1.4D401F',
+                    'acodec': 'none',
+                    'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                    'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                    'protocol': 'http_dash_segments',
+                }],
+                {
+                    'en': [
+                        {
+                            'ext': 'mp4',
+                            'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                            'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
+                            'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
+                            'protocol': 'http_dash_segments',
+                        }
+                    ]
+                },
             )
             )
         ]
         ]
 
 
-        for mpd_file, mpd_url, mpd_base_url, expected_formats in _TEST_CASES:
-            with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
-                         mode='r', encoding='utf-8') as f:
-                formats = self.ie._parse_mpd_formats(
+        for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
+            with open('./test/testdata/mpd/%s.mpd' % mpd_file,
+                      mode='r', encoding='utf-8') as f:
+                formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     mpd_base_url=mpd_base_url, mpd_url=mpd_url)
                     mpd_base_url=mpd_base_url, mpd_url=mpd_url)
                 self.ie._sort_formats(formats)
                 self.ie._sort_formats(formats)
                 expect_value(self, formats, expected_formats, None)
                 expect_value(self, formats, expected_formats, None)
+                expect_value(self, subtitles, expected_subtitles, None)
 
 
     def test_parse_f4m_formats(self):
     def test_parse_f4m_formats(self):
         _TEST_CASES = [
         _TEST_CASES = [
@@ -1051,8 +1326,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
         ]
         ]
 
 
         for f4m_file, f4m_url, expected_formats in _TEST_CASES:
         for f4m_file, f4m_url, expected_formats in _TEST_CASES:
-            with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
-                         mode='r', encoding='utf-8') as f:
+            with open('./test/testdata/f4m/%s.f4m' % f4m_file,
+                      mode='r', encoding='utf-8') as f:
                 formats = self.ie._parse_f4m_formats(
                 formats = self.ie._parse_f4m_formats(
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     f4m_url, None)
                     f4m_url, None)
@@ -1099,8 +1374,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
         ]
         ]
 
 
         for xspf_file, xspf_url, expected_entries in _TEST_CASES:
         for xspf_file, xspf_url, expected_entries in _TEST_CASES:
-            with io.open('./test/testdata/xspf/%s.xspf' % xspf_file,
-                         mode='r', encoding='utf-8') as f:
+            with open('./test/testdata/xspf/%s.xspf' % xspf_file,
+                      mode='r', encoding='utf-8') as f:
                 entries = self.ie._parse_xspf(
                 entries = self.ie._parse_xspf(
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     compat_etree_fromstring(f.read().encode('utf-8')),
                     xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
                     xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)

+ 205 - 16
test/test_YoutubeDL.py

@@ -10,14 +10,31 @@ import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
 import copy
 import copy
+import json
 
 
-from test.helper import FakeYDL, assertRegexpMatches
+from test.helper import (
+    FakeYDL,
+    assertRegexpMatches,
+    try_rm,
+)
 from youtube_dl import YoutubeDL
 from youtube_dl import YoutubeDL
-from youtube_dl.compat import compat_str, compat_urllib_error
+from youtube_dl.compat import (
+    compat_http_cookiejar_Cookie,
+    compat_http_cookies_SimpleCookie,
+    compat_kwargs,
+    compat_open as open,
+    compat_str,
+    compat_urllib_error,
+)
+
 from youtube_dl.extractor import YoutubeIE
 from youtube_dl.extractor import YoutubeIE
 from youtube_dl.extractor.common import InfoExtractor
 from youtube_dl.extractor.common import InfoExtractor
 from youtube_dl.postprocessor.common import PostProcessor
 from youtube_dl.postprocessor.common import PostProcessor
-from youtube_dl.utils import ExtractorError, match_filter_func
+from youtube_dl.utils import (
+    ExtractorError,
+    match_filter_func,
+    traverse_obj,
+)
 
 
 TEST_URL = 'http://localhost/sample.mp4'
 TEST_URL = 'http://localhost/sample.mp4'
 
 
@@ -29,11 +46,14 @@ class YDL(FakeYDL):
         self.msgs = []
         self.msgs = []
 
 
     def process_info(self, info_dict):
     def process_info(self, info_dict):
-        self.downloaded_info_dicts.append(info_dict)
+        self.downloaded_info_dicts.append(info_dict.copy())
 
 
     def to_screen(self, msg):
     def to_screen(self, msg):
         self.msgs.append(msg)
         self.msgs.append(msg)
 
 
+    def dl(self, *args, **kwargs):
+        assert False, 'Downloader must not be invoked for test_YoutubeDL'
+
 
 
 def _make_result(formats, **kwargs):
 def _make_result(formats, **kwargs):
     res = {
     res = {
@@ -42,8 +62,9 @@ def _make_result(formats, **kwargs):
         'title': 'testttitle',
         'title': 'testttitle',
         'extractor': 'testex',
         'extractor': 'testex',
         'extractor_key': 'TestEx',
         'extractor_key': 'TestEx',
+        'webpage_url': 'http://example.com/watch?v=shenanigans',
     }
     }
-    res.update(**kwargs)
+    res.update(**compat_kwargs(kwargs))
     return res
     return res
 
 
 
 
@@ -681,12 +702,12 @@ class TestYoutubeDL(unittest.TestCase):
 
 
         class SimplePP(PostProcessor):
         class SimplePP(PostProcessor):
             def run(self, info):
             def run(self, info):
-                with open(audiofile, 'wt') as f:
+                with open(audiofile, 'w') as f:
                     f.write('EXAMPLE')
                     f.write('EXAMPLE')
                 return [info['filepath']], info
                 return [info['filepath']], info
 
 
         def run_pp(params, PP):
         def run_pp(params, PP):
-            with open(filename, 'wt') as f:
+            with open(filename, 'w') as f:
                 f.write('EXAMPLE')
                 f.write('EXAMPLE')
             ydl = YoutubeDL(params)
             ydl = YoutubeDL(params)
             ydl.add_post_processor(PP())
             ydl.add_post_processor(PP())
@@ -705,7 +726,7 @@ class TestYoutubeDL(unittest.TestCase):
 
 
         class ModifierPP(PostProcessor):
         class ModifierPP(PostProcessor):
             def run(self, info):
             def run(self, info):
-                with open(info['filepath'], 'wt') as f:
+                with open(info['filepath'], 'w') as f:
                     f.write('MODIFIED')
                     f.write('MODIFIED')
                 return [], info
                 return [], info
 
 
@@ -930,17 +951,11 @@ class TestYoutubeDL(unittest.TestCase):
     # Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
     # Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
     def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
     def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
 
 
-        class _YDL(YDL):
-            def __init__(self, *args, **kwargs):
-                super(_YDL, self).__init__(*args, **kwargs)
-
-            def trouble(self, s, tb=None):
-                pass
-
-        ydl = _YDL({
+        ydl = YDL({
             'format': 'extra',
             'format': 'extra',
             'ignoreerrors': True,
             'ignoreerrors': True,
         })
         })
+        ydl.trouble = lambda *_, **__: None
 
 
         class VideoIE(InfoExtractor):
         class VideoIE(InfoExtractor):
             _VALID_URL = r'video:(?P<id>\d+)'
             _VALID_URL = r'video:(?P<id>\d+)'
@@ -997,6 +1012,180 @@ class TestYoutubeDL(unittest.TestCase):
         self.assertEqual(downloaded['extractor'], 'Video')
         self.assertEqual(downloaded['extractor'], 'Video')
         self.assertEqual(downloaded['extractor_key'], 'Video')
         self.assertEqual(downloaded['extractor_key'], 'Video')
 
 
+    def test_default_times(self):
+        """Test addition of missing upload/release/_date from /release_/timestamp"""
+        info = {
+            'id': '1234',
+            'url': TEST_URL,
+            'title': 'Title',
+            'ext': 'mp4',
+            'timestamp': 1631352900,
+            'release_timestamp': 1632995931,
+        }
+
+        params = {'simulate': True, }
+        ydl = FakeYDL(params)
+        out_info = ydl.process_ie_result(info)
+        self.assertTrue(isinstance(out_info['upload_date'], compat_str))
+        self.assertEqual(out_info['upload_date'], '20210911')
+        self.assertTrue(isinstance(out_info['release_date'], compat_str))
+        self.assertEqual(out_info['release_date'], '20210930')
+
+
+class TestYoutubeDLCookies(unittest.TestCase):
+
+    @staticmethod
+    def encode_cookie(cookie):
+        if not isinstance(cookie, dict):
+            cookie = vars(cookie)
+        for name, value in cookie.items():
+            yield name, compat_str(value)
+
+    @classmethod
+    def comparable_cookies(cls, cookies):
+        # Work around cookiejar cookies not being unicode strings
+        return sorted(map(tuple, map(sorted, map(cls.encode_cookie, cookies))))
+
+    def assertSameCookies(self, c1, c2, msg=None):
+        return self.assertEqual(
+            *map(self.comparable_cookies, (c1, c2)),
+            msg=msg)
+
+    def assertSameCookieStrings(self, c1, c2, msg=None):
+        return self.assertSameCookies(
+            *map(lambda c: compat_http_cookies_SimpleCookie(c).values(), (c1, c2)),
+            msg=msg)
+
+    def test_header_cookies(self):
+
+        ydl = FakeYDL()
+        ydl.report_warning = lambda *_, **__: None
+
+        def cookie(name, value, version=None, domain='', path='', secure=False, expires=None):
+            return compat_http_cookiejar_Cookie(
+                version or 0, name, value, None, False,
+                domain, bool(domain), bool(domain), path, bool(path),
+                secure, expires, False, None, None, rest={})
+
+        test_url, test_domain = (t % ('yt.dl',) for t in ('https://%s/test', '.%s'))
+
+        def test(encoded_cookies, cookies, headers=False, round_trip=None, error_re=None):
+            def _test():
+                ydl.cookiejar.clear()
+                ydl._load_cookies(encoded_cookies, autoscope=headers)
+                if headers:
+                    ydl._apply_header_cookies(test_url)
+                data = {'url': test_url}
+                ydl._calc_headers(data)
+                self.assertSameCookies(
+                    cookies, ydl.cookiejar,
+                    'Extracted cookiejar.Cookie is not the same')
+                if not headers:
+                    self.assertSameCookieStrings(
+                        data.get('cookies'), round_trip or encoded_cookies,
+                        msg='Cookie is not the same as round trip')
+                ydl.__dict__['_YoutubeDL__header_cookies'] = []
+
+            try:
+                _test()
+            except AssertionError:
+                raise
+            except Exception as e:
+                if not error_re:
+                    raise
+                assertRegexpMatches(self, e.args[0], error_re.join(('.*',) * 2))
+
+        test('test=value; Domain=' + test_domain, [cookie('test', 'value', domain=test_domain)])
+        test('test=value', [cookie('test', 'value')], error_re='Unscoped cookies are not allowed')
+        test('cookie1=value1; Domain={0}; Path=/test; cookie2=value2; Domain={0}; Path=/'.format(test_domain), [
+            cookie('cookie1', 'value1', domain=test_domain, path='/test'),
+            cookie('cookie2', 'value2', domain=test_domain, path='/')])
+        cookie_kw = compat_kwargs(
+            {'domain': test_domain, 'path': '/test', 'secure': True, 'expires': '9999999999', })
+        test('test=value; Domain={domain}; Path={path}; Secure; Expires={expires}'.format(**cookie_kw), [
+            cookie('test', 'value', **cookie_kw)])
+        test('test="value; "; path=/test; domain=' + test_domain, [
+            cookie('test', 'value; ', domain=test_domain, path='/test')],
+            round_trip='test="value\\073 "; Domain={0}; Path=/test'.format(test_domain))
+        test('name=; Domain=' + test_domain, [cookie('name', '', domain=test_domain)],
+             round_trip='name=""; Domain=' + test_domain)
+        test('test=value', [cookie('test', 'value', domain=test_domain)], headers=True)
+        test('cookie1=value; Domain={0}; cookie2=value'.format(test_domain), [],
+             headers=True, error_re='Invalid syntax')
+        ydl.report_warning = ydl.report_error
+        test('test=value', [], headers=True, error_re='Passing cookies as a header is a potential security risk')
+
+    def test_infojson_cookies(self):
+        TEST_FILE = 'test_infojson_cookies.info.json'
+        TEST_URL = 'https://example.com/example.mp4'
+        COOKIES = 'a=b; Domain=.example.com; c=d; Domain=.example.com'
+        COOKIE_HEADER = {'Cookie': 'a=b; c=d'}
+
+        ydl = FakeYDL()
+        ydl.process_info = lambda x: ydl._write_info_json('test', x, TEST_FILE)
+
+        def make_info(info_header_cookies=False, fmts_header_cookies=False, cookies_field=False):
+            fmt = {'url': TEST_URL}
+            if fmts_header_cookies:
+                fmt['http_headers'] = COOKIE_HEADER
+            if cookies_field:
+                fmt['cookies'] = COOKIES
+            return _make_result([fmt], http_headers=COOKIE_HEADER if info_header_cookies else None)
+
+        def test(initial_info, note):
+
+            def failure_msg(why):
+                return ' when '.join((why, note))
+
+            result = {}
+            result['processed'] = ydl.process_ie_result(initial_info)
+            self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
+                            msg=failure_msg('No cookies set in cookiejar after initial process'))
+            ydl.cookiejar.clear()
+            with open(TEST_FILE) as infojson:
+                result['loaded'] = ydl.sanitize_info(json.load(infojson), True)
+            result['final'] = ydl.process_ie_result(result['loaded'].copy(), download=False)
+            self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
+                            msg=failure_msg('No cookies set in cookiejar after final process'))
+            ydl.cookiejar.clear()
+            for key in ('processed', 'loaded', 'final'):
+                info = result[key]
+                self.assertIsNone(
+                    traverse_obj(info, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False),
+                    msg=failure_msg('Cookie header not removed in {0} result'.format(key)))
+                self.assertSameCookieStrings(
+                    traverse_obj(info, ((None, ('formats', 0)), 'cookies'), get_all=False), COOKIES,
+                    msg=failure_msg('No cookies field found in {0} result'.format(key)))
+
+        test({'url': TEST_URL, 'http_headers': COOKIE_HEADER, 'id': '1', 'title': 'x'}, 'no formats field')
+        test(make_info(info_header_cookies=True), 'info_dict header cokies')
+        test(make_info(fmts_header_cookies=True), 'format header cookies')
+        test(make_info(info_header_cookies=True, fmts_header_cookies=True), 'info_dict and format header cookies')
+        test(make_info(info_header_cookies=True, fmts_header_cookies=True, cookies_field=True), 'all cookies fields')
+        test(make_info(cookies_field=True), 'cookies format field')
+        test({'url': TEST_URL, 'cookies': COOKIES, 'id': '1', 'title': 'x'}, 'info_dict cookies field only')
+
+        try_rm(TEST_FILE)
+
+    def test_add_headers_cookie(self):
+        def check_for_cookie_header(result):
+            return traverse_obj(result, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False)
+
+        ydl = FakeYDL({'http_headers': {'Cookie': 'a=b'}})
+        ydl._apply_header_cookies(_make_result([])['webpage_url'])  # Scope to input webpage URL: .example.com
+
+        fmt = {'url': 'https://example.com/video.mp4'}
+        result = ydl.process_ie_result(_make_result([fmt]), download=False)
+        self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies in result info_dict')
+        self.assertEqual(result.get('cookies'), 'a=b; Domain=.example.com', msg='No cookies were set in cookies field')
+        self.assertIn('a=b', ydl.cookiejar.get_cookie_header(fmt['url']), msg='No cookies were set in cookiejar')
+
+        fmt = {'url': 'https://wrong.com/video.mp4'}
+        result = ydl.process_ie_result(_make_result([fmt]), download=False)
+        self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies for wrong domain')
+        self.assertFalse(result.get('cookies'), msg='Cookies set in cookies field for wrong domain')
+        self.assertFalse(ydl.cookiejar.get_cookie_header(fmt['url']), msg='Cookies set in cookiejar for wrong domain')
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 14 - 0
test/test_YoutubeDLCookieJar.py

@@ -46,6 +46,20 @@ class TestYoutubeDLCookieJar(unittest.TestCase):
         # will be ignored
         # will be ignored
         self.assertFalse(cookiejar._cookies)
         self.assertFalse(cookiejar._cookies)
 
 
+    def test_get_cookie_header(self):
+        cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt')
+        cookiejar.load(ignore_discard=True, ignore_expires=True)
+        header = cookiejar.get_cookie_header('https://www.foobar.foobar')
+        self.assertIn('HTTPONLY_COOKIE', header)
+
+    def test_get_cookies_for_url(self):
+        cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt')
+        cookiejar.load(ignore_discard=True, ignore_expires=True)
+        cookies = cookiejar.get_cookies_for_url('https://www.foobar.foobar/')
+        self.assertEqual(len(cookies), 2)
+        cookies = cookiejar.get_cookies_for_url('https://foobar.foobar/')
+        self.assertFalse(cookies)
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 8 - 1
test/test_aes.py

@@ -8,7 +8,7 @@ import sys
 import unittest
 import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
+from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text, aes_ecb_encrypt
 from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes
 from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes
 import base64
 import base64
 
 
@@ -58,6 +58,13 @@ class TestAES(unittest.TestCase):
         decrypted = (aes_decrypt_text(encrypted, password, 32))
         decrypted = (aes_decrypt_text(encrypted, password, 32))
         self.assertEqual(decrypted, self.secret_msg)
         self.assertEqual(decrypted, self.secret_msg)
 
 
+    def test_ecb_encrypt(self):
+        data = bytes_to_intlist(self.secret_msg)
+        encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key))
+        self.assertEqual(
+            encrypted,
+            b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 8 - 4
test/test_age_restriction.py

@@ -11,6 +11,7 @@ from test.helper import try_rm
 
 
 
 
 from youtube_dl import YoutubeDL
 from youtube_dl import YoutubeDL
+from youtube_dl.utils import DownloadError
 
 
 
 
 def _download_restricted(url, filename, age):
 def _download_restricted(url, filename, age):
@@ -26,7 +27,10 @@ def _download_restricted(url, filename, age):
     ydl.add_default_info_extractors()
     ydl.add_default_info_extractors()
     json_filename = os.path.splitext(filename)[0] + '.info.json'
     json_filename = os.path.splitext(filename)[0] + '.info.json'
     try_rm(json_filename)
     try_rm(json_filename)
-    ydl.download([url])
+    try:
+        ydl.download([url])
+    except DownloadError:
+        try_rm(json_filename)
     res = os.path.exists(json_filename)
     res = os.path.exists(json_filename)
     try_rm(json_filename)
     try_rm(json_filename)
     return res
     return res
@@ -38,12 +42,12 @@ class TestAgeRestriction(unittest.TestCase):
         self.assertFalse(_download_restricted(url, filename, age))
         self.assertFalse(_download_restricted(url, filename, age))
 
 
     def test_youtube(self):
     def test_youtube(self):
-        self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
+        self._assert_restricted('HtVdAasjOgU', 'HtVdAasjOgU.mp4', 10)
 
 
     def test_youporn(self):
     def test_youporn(self):
         self._assert_restricted(
         self._assert_restricted(
-            'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
-            '505835.mp4', 2, old_age=25)
+            'https://www.youporn.com/watch/16715086/sex-ed-in-detention-18-asmr/',
+            '16715086.mp4', 2, old_age=25)
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 14 - 2
test/test_cache.py

@@ -3,17 +3,18 @@
 
 
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import shutil
-
 # Allow direct execution
 # Allow direct execution
 import os
 import os
 import sys
 import sys
 import unittest
 import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
+import shutil
 
 
 from test.helper import FakeYDL
 from test.helper import FakeYDL
 from youtube_dl.cache import Cache
 from youtube_dl.cache import Cache
+from youtube_dl.utils import version_tuple
+from youtube_dl.version import __version__
 
 
 
 
 def _is_empty(d):
 def _is_empty(d):
@@ -54,6 +55,17 @@ class TestCache(unittest.TestCase):
         self.assertFalse(os.path.exists(self.test_dir))
         self.assertFalse(os.path.exists(self.test_dir))
         self.assertEqual(c.load('test_cache', 'k.'), None)
         self.assertEqual(c.load('test_cache', 'k.'), None)
 
 
+    def test_cache_validation(self):
+        ydl = FakeYDL({
+            'cachedir': self.test_dir,
+        })
+        c = Cache(ydl)
+        obj = {'x': 1, 'y': ['ä', '\\a', True]}
+        c.store('test_cache', 'k.', obj)
+        self.assertEqual(c.load('test_cache', 'k.', min_ver='1970.01.01'), obj)
+        new_version = '.'.join(('%d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__)))
+        self.assertIs(c.load('test_cache', 'k.', min_ver=new_version), None)
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 32 - 4
test/test_compat.py

@@ -11,6 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
 
 
 from youtube_dl.compat import (
 from youtube_dl.compat import (
+    compat_casefold,
     compat_getenv,
     compat_getenv,
     compat_setenv,
     compat_setenv,
     compat_etree_Element,
     compat_etree_Element,
@@ -22,6 +23,7 @@ from youtube_dl.compat import (
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_unquote_plus,
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlencode,
+    compat_urllib_request,
 )
 )
 
 
 
 
@@ -47,10 +49,11 @@ class TestCompat(unittest.TestCase):
 
 
     def test_all_present(self):
     def test_all_present(self):
         import youtube_dl.compat
         import youtube_dl.compat
-        all_names = youtube_dl.compat.__all__
-        present_names = set(filter(
+        all_names = sorted(
+            youtube_dl.compat.__all__ + youtube_dl.compat.legacy)
+        present_names = set(map(compat_str, filter(
             lambda c: '_' in c and not c.startswith('_'),
             lambda c: '_' in c and not c.startswith('_'),
-            dir(youtube_dl.compat))) - set(['unicode_literals'])
+            dir(youtube_dl.compat)))) - set(['unicode_literals'])
         self.assertEqual(all_names, sorted(present_names))
         self.assertEqual(all_names, sorted(present_names))
 
 
     def test_compat_urllib_parse_unquote(self):
     def test_compat_urllib_parse_unquote(self):
@@ -118,9 +121,34 @@ class TestCompat(unittest.TestCase):
 <smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
 <smil xmlns="http://www.w3.org/2001/SMIL20/Language"></smil>'''
         compat_etree_fromstring(xml)
         compat_etree_fromstring(xml)
 
 
-    def test_struct_unpack(self):
+    def test_compat_struct_unpack(self):
         self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
         self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
 
 
+    def test_compat_casefold(self):
+        if hasattr(compat_str, 'casefold'):
+            # don't bother to test str.casefold() (again)
+            return
+        # thanks https://bugs.python.org/file24232/casefolding.patch
+        self.assertEqual(compat_casefold('hello'), 'hello')
+        self.assertEqual(compat_casefold('hELlo'), 'hello')
+        self.assertEqual(compat_casefold('ß'), 'ss')
+        self.assertEqual(compat_casefold('fi'), 'fi')
+        self.assertEqual(compat_casefold('\u03a3'), '\u03c3')
+        self.assertEqual(compat_casefold('A\u0345\u03a3'), 'a\u03b9\u03c3')
+
+    def test_compat_urllib_request_Request(self):
+        self.assertEqual(
+            compat_urllib_request.Request('http://127.0.0.1', method='PUT').get_method(),
+            'PUT')
+
+        class PUTrequest(compat_urllib_request.Request):
+            def get_method(self):
+                return 'PUT'
+
+        self.assertEqual(
+            PUTrequest('http://127.0.0.1').get_method(),
+            'PUT')
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 42 - 14
test/test_download.py

@@ -9,7 +9,6 @@ import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
 from test.helper import (
 from test.helper import (
-    assertGreaterEqual,
     expect_warnings,
     expect_warnings,
     get_params,
     get_params,
     gettestcases,
     gettestcases,
@@ -20,26 +19,35 @@ from test.helper import (
 
 
 
 
 import hashlib
 import hashlib
-import io
 import json
 import json
 import socket
 import socket
 
 
 import youtube_dl.YoutubeDL
 import youtube_dl.YoutubeDL
 from youtube_dl.compat import (
 from youtube_dl.compat import (
     compat_http_client,
     compat_http_client,
-    compat_urllib_error,
     compat_HTTPError,
     compat_HTTPError,
+    compat_open as open,
+    compat_urllib_error,
 )
 )
 from youtube_dl.utils import (
 from youtube_dl.utils import (
     DownloadError,
     DownloadError,
     ExtractorError,
     ExtractorError,
+    error_to_compat_str,
     format_bytes,
     format_bytes,
+    IDENTITY,
+    preferredencoding,
     UnavailableVideoError,
     UnavailableVideoError,
 )
 )
 from youtube_dl.extractor import get_info_extractor
 from youtube_dl.extractor import get_info_extractor
 
 
 RETRIES = 3
 RETRIES = 3
 
 
+# Some unittest APIs require actual str
+if not isinstance('TEST', str):
+    _encode_str = lambda s: s.encode(preferredencoding())
+else:
+    _encode_str = IDENTITY
+
 
 
 class YoutubeDL(youtube_dl.YoutubeDL):
 class YoutubeDL(youtube_dl.YoutubeDL):
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
@@ -100,28 +108,31 @@ def generator(test_case, tname):
 
 
         def print_skipping(reason):
         def print_skipping(reason):
             print('Skipping %s: %s' % (test_case['name'], reason))
             print('Skipping %s: %s' % (test_case['name'], reason))
+            self.skipTest(_encode_str(reason))
+
         if not ie.working():
         if not ie.working():
             print_skipping('IE marked as not _WORKING')
             print_skipping('IE marked as not _WORKING')
-            return
 
 
         for tc in test_cases:
         for tc in test_cases:
             info_dict = tc.get('info_dict', {})
             info_dict = tc.get('info_dict', {})
             if not (info_dict.get('id') and info_dict.get('ext')):
             if not (info_dict.get('id') and info_dict.get('ext')):
-                raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
+                raise Exception('Test definition (%s) requires both \'id\' and \'ext\' keys present to define the output file' % (tname, ))
 
 
         if 'skip' in test_case:
         if 'skip' in test_case:
             print_skipping(test_case['skip'])
             print_skipping(test_case['skip'])
-            return
+
         for other_ie in other_ies:
         for other_ie in other_ies:
             if not other_ie.working():
             if not other_ie.working():
                 print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
                 print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
-                return
 
 
         params = get_params(test_case.get('params', {}))
         params = get_params(test_case.get('params', {}))
         params['outtmpl'] = tname + '_' + params['outtmpl']
         params['outtmpl'] = tname + '_' + params['outtmpl']
         if is_playlist and 'playlist' not in test_case:
         if is_playlist and 'playlist' not in test_case:
             params.setdefault('extract_flat', 'in_playlist')
             params.setdefault('extract_flat', 'in_playlist')
-            params.setdefault('playlistend', test_case.get('playlist_mincount'))
+            params.setdefault('playlistend',
+                              test_case['playlist_maxcount'] + 1
+                              if test_case.get('playlist_maxcount')
+                              else test_case.get('playlist_mincount'))
             params.setdefault('skip_download', True)
             params.setdefault('skip_download', True)
 
 
         ydl = YoutubeDL(params, auto_init=False)
         ydl = YoutubeDL(params, auto_init=False)
@@ -147,6 +158,7 @@ def generator(test_case, tname):
                 try_rm(tc_filename)
                 try_rm(tc_filename)
                 try_rm(tc_filename + '.part')
                 try_rm(tc_filename + '.part')
                 try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
                 try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
+
         try_rm_tcs_files()
         try_rm_tcs_files()
         try:
         try:
             try_num = 1
             try_num = 1
@@ -161,7 +173,9 @@ def generator(test_case, tname):
                 except (DownloadError, ExtractorError) as err:
                 except (DownloadError, ExtractorError) as err:
                     # Check if the exception is not a network related one
                     # Check if the exception is not a network related one
                     if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
                     if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
-                        raise
+                        msg = getattr(err, 'msg', error_to_compat_str(err))
+                        err.msg = '%s (%s)' % (msg, tname, )
+                        raise err
 
 
                     if try_num == RETRIES:
                     if try_num == RETRIES:
                         report_warning('%s failed due to network errors, skipping...' % tname)
                         report_warning('%s failed due to network errors, skipping...' % tname)
@@ -179,13 +193,19 @@ def generator(test_case, tname):
                 expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
                 expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
 
 
             if 'playlist_mincount' in test_case:
             if 'playlist_mincount' in test_case:
-                assertGreaterEqual(
-                    self,
+                self.assertGreaterEqual(
                     len(res_dict['entries']),
                     len(res_dict['entries']),
                     test_case['playlist_mincount'],
                     test_case['playlist_mincount'],
                     'Expected at least %d in playlist %s, but got only %d' % (
                     'Expected at least %d in playlist %s, but got only %d' % (
                         test_case['playlist_mincount'], test_case['url'],
                         test_case['playlist_mincount'], test_case['url'],
                         len(res_dict['entries'])))
                         len(res_dict['entries'])))
+            if 'playlist_maxcount' in test_case:
+                self.assertLessEqual(
+                    len(res_dict['entries']),
+                    test_case['playlist_maxcount'],
+                    'Expected at most %d in playlist %s, but got %d' % (
+                        test_case['playlist_maxcount'], test_case['url'],
+                        len(res_dict['entries'])))
             if 'playlist_count' in test_case:
             if 'playlist_count' in test_case:
                 self.assertEqual(
                 self.assertEqual(
                     len(res_dict['entries']),
                     len(res_dict['entries']),
@@ -210,7 +230,15 @@ def generator(test_case, tname):
                 # First, check test cases' data against extracted data alone
                 # First, check test cases' data against extracted data alone
                 expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
                 expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
                 # Now, check downloaded file consistency
                 # Now, check downloaded file consistency
+                # support test-case with volatile ID, signalled by regexp value
+                if tc.get('info_dict', {}).get('id', '').startswith('re:'):
+                    test_id = tc['info_dict']['id']
+                    tc['info_dict']['id'] = tc_res_dict['id']
+                else:
+                    test_id = None
                 tc_filename = get_tc_filename(tc)
                 tc_filename = get_tc_filename(tc)
+                if test_id:
+                    tc['info_dict']['id'] = test_id
                 if not test_case.get('params', {}).get('skip_download', False):
                 if not test_case.get('params', {}).get('skip_download', False):
                     self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
                     self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
                     self.assertTrue(tc_filename in finished_hook_called)
                     self.assertTrue(tc_filename in finished_hook_called)
@@ -219,8 +247,8 @@ def generator(test_case, tname):
                         if params.get('test'):
                         if params.get('test'):
                             expected_minsize = max(expected_minsize, 10000)
                             expected_minsize = max(expected_minsize, 10000)
                         got_fsize = os.path.getsize(tc_filename)
                         got_fsize = os.path.getsize(tc_filename)
-                        assertGreaterEqual(
-                            self, got_fsize, expected_minsize,
+                        self.assertGreaterEqual(
+                            got_fsize, expected_minsize,
                             'Expected %s to be at least %s, but it\'s only %s ' %
                             'Expected %s to be at least %s, but it\'s only %s ' %
                             (tc_filename, format_bytes(expected_minsize),
                             (tc_filename, format_bytes(expected_minsize),
                                 format_bytes(got_fsize)))
                                 format_bytes(got_fsize)))
@@ -233,7 +261,7 @@ def generator(test_case, tname):
                 self.assertTrue(
                 self.assertTrue(
                     os.path.exists(info_json_fn),
                     os.path.exists(info_json_fn),
                     'Missing info file %s' % info_json_fn)
                     'Missing info file %s' % info_json_fn)
-                with io.open(info_json_fn, encoding='utf-8') as infof:
+                with open(info_json_fn, encoding='utf-8') as infof:
                     info_dict = json.load(infof)
                     info_dict = json.load(infof)
                 expect_info_dict(self, info_dict, tc.get('info_dict', {}))
                 expect_info_dict(self, info_dict, tc.get('info_dict', {}))
         finally:
         finally:

+ 272 - 0
test/test_downloader_external.py

@@ -0,0 +1,272 @@
+#!/usr/bin/env python
+# coding: utf-8
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import re
+import sys
+import subprocess
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import (
+    FakeLogger,
+    FakeYDL,
+    http_server_port,
+    try_rm,
+)
+from youtube_dl import YoutubeDL
+from youtube_dl.compat import (
+    compat_contextlib_suppress,
+    compat_http_cookiejar_Cookie,
+    compat_http_server,
+    compat_kwargs,
+)
+from youtube_dl.utils import (
+    encodeFilename,
+    join_nonempty,
+)
+from youtube_dl.downloader.external import (
+    Aria2cFD,
+    Aria2pFD,
+    AxelFD,
+    CurlFD,
+    FFmpegFD,
+    HttpieFD,
+    WgetFD,
+)
+from youtube_dl.postprocessor import (
+    FFmpegPostProcessor,
+)
+import threading
+
+TEST_SIZE = 10 * 1024
+
+TEST_COOKIE = {
+    'version': 0,
+    'name': 'test',
+    'value': 'ytdlp',
+    'port': None,
+    'port_specified': False,
+    'domain': '.example.com',
+    'domain_specified': True,
+    'domain_initial_dot': False,
+    'path': '/',
+    'path_specified': True,
+    'secure': False,
+    'expires': None,
+    'discard': False,
+    'comment': None,
+    'comment_url': None,
+    'rest': {},
+}
+
+TEST_COOKIE_VALUE = join_nonempty('name', 'value', delim='=', from_dict=TEST_COOKIE)
+
+TEST_INFO = {'url': 'http://www.example.com/'}
+
+
+def cookiejar_Cookie(**cookie_args):
+    return compat_http_cookiejar_Cookie(**compat_kwargs(cookie_args))
+
+
+def ifExternalFDAvailable(externalFD):
+    return unittest.skipUnless(externalFD.available(),
+                               externalFD.get_basename() + ' not found')
+
+
+class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
+    def log_message(self, format, *args):
+        pass
+
+    def send_content_range(self, total=None):
+        range_header = self.headers.get('Range')
+        start = end = None
+        if range_header:
+            mobj = re.match(r'bytes=(\d+)-(\d+)', range_header)
+            if mobj:
+                start, end = (int(mobj.group(i)) for i in (1, 2))
+        valid_range = start is not None and end is not None
+        if valid_range:
+            content_range = 'bytes %d-%d' % (start, end)
+            if total:
+                content_range += '/%d' % total
+            self.send_header('Content-Range', content_range)
+        return (end - start + 1) if valid_range else total
+
+    def serve(self, range=True, content_length=True):
+        self.send_response(200)
+        self.send_header('Content-Type', 'video/mp4')
+        size = TEST_SIZE
+        if range:
+            size = self.send_content_range(TEST_SIZE)
+        if content_length:
+            self.send_header('Content-Length', size)
+        self.end_headers()
+        self.wfile.write(b'#' * size)
+
+    def do_GET(self):
+        if self.path == '/regular':
+            self.serve()
+        elif self.path == '/no-content-length':
+            self.serve(content_length=False)
+        elif self.path == '/no-range':
+            self.serve(range=False)
+        elif self.path == '/no-range-no-content-length':
+            self.serve(range=False, content_length=False)
+        else:
+            assert False, 'unrecognised server path'
+
+
+@ifExternalFDAvailable(Aria2pFD)
+class TestAria2pFD(unittest.TestCase):
+    def setUp(self):
+        self.httpd = compat_http_server.HTTPServer(
+            ('127.0.0.1', 0), HTTPTestRequestHandler)
+        self.port = http_server_port(self.httpd)
+        self.server_thread = threading.Thread(target=self.httpd.serve_forever)
+        self.server_thread.daemon = True
+        self.server_thread.start()
+
+    def download(self, params, ep):
+        with subprocess.Popen(
+            ['aria2c', '--enable-rpc'],
+            stdout=subprocess.DEVNULL,
+            stderr=subprocess.DEVNULL
+        ) as process:
+            if not process.poll():
+                filename = 'testfile.mp4'
+                params['logger'] = FakeLogger()
+                params['outtmpl'] = filename
+                ydl = YoutubeDL(params)
+                try_rm(encodeFilename(filename))
+                self.assertEqual(ydl.download(['http://127.0.0.1:%d/%s' % (self.port, ep)]), 0)
+                self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
+                try_rm(encodeFilename(filename))
+            process.kill()
+
+    def download_all(self, params):
+        for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'):
+            self.download(params, ep)
+
+    def test_regular(self):
+        self.download_all({'external_downloader': 'aria2p'})
+
+    def test_chunked(self):
+        self.download_all({
+            'external_downloader': 'aria2p',
+            'http_chunk_size': 1000,
+        })
+
+
+@ifExternalFDAvailable(HttpieFD)
+class TestHttpieFD(unittest.TestCase):
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = HttpieFD(ydl, {})
+            self.assertEqual(
+                downloader._make_cmd('test', TEST_INFO),
+                ['http', '--download', '--output', 'test', 'http://www.example.com/'])
+
+            # Test cookie header is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            self.assertEqual(
+                downloader._make_cmd('test', TEST_INFO),
+                ['http', '--download', '--output', 'test',
+                 'http://www.example.com/', 'Cookie:' + TEST_COOKIE_VALUE])
+
+
+@ifExternalFDAvailable(AxelFD)
+class TestAxelFD(unittest.TestCase):
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = AxelFD(ydl, {})
+            self.assertEqual(
+                downloader._make_cmd('test', TEST_INFO),
+                ['axel', '-o', 'test', '--', 'http://www.example.com/'])
+
+            # Test cookie header is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            self.assertEqual(
+                downloader._make_cmd('test', TEST_INFO),
+                ['axel', '-o', 'test', '-H', 'Cookie: ' + TEST_COOKIE_VALUE,
+                 '--max-redirect=0', '--', 'http://www.example.com/'])
+
+
+@ifExternalFDAvailable(WgetFD)
+class TestWgetFD(unittest.TestCase):
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = WgetFD(ydl, {})
+            self.assertNotIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
+            # Test cookiejar tempfile arg is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            self.assertIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
+
+
+@ifExternalFDAvailable(CurlFD)
+class TestCurlFD(unittest.TestCase):
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = CurlFD(ydl, {})
+            self.assertNotIn('--cookie', downloader._make_cmd('test', TEST_INFO))
+            # Test cookie header is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            self.assertIn('--cookie', downloader._make_cmd('test', TEST_INFO))
+            self.assertIn(TEST_COOKIE_VALUE, downloader._make_cmd('test', TEST_INFO))
+
+
+@ifExternalFDAvailable(Aria2cFD)
+class TestAria2cFD(unittest.TestCase):
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = Aria2cFD(ydl, {})
+            downloader._make_cmd('test', TEST_INFO)
+            self.assertFalse(hasattr(downloader, '_cookies_tempfile'))
+
+            # Test cookiejar tempfile arg is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            cmd = downloader._make_cmd('test', TEST_INFO)
+            self.assertIn('--load-cookies=%s' % downloader._cookies_tempfile, cmd)
+
+
+# Handle delegated availability
+def ifFFmpegFDAvailable(externalFD):
+    # raise SkipTest, or set False!
+    avail = ifExternalFDAvailable(externalFD) and False
+    with compat_contextlib_suppress(Exception):
+        avail = FFmpegPostProcessor(downloader=None).available
+    return unittest.skipUnless(
+        avail, externalFD.get_basename() + ' not found')
+
+
+@ifFFmpegFDAvailable(FFmpegFD)
+class TestFFmpegFD(unittest.TestCase):
+    _args = []
+
+    def _test_cmd(self, args):
+        self._args = args
+
+    def test_make_cmd(self):
+        with FakeYDL() as ydl:
+            downloader = FFmpegFD(ydl, {})
+            downloader._debug_cmd = self._test_cmd
+            info_dict = TEST_INFO.copy()
+            info_dict['ext'] = 'mp4'
+
+            downloader._call_downloader('test', info_dict)
+            self.assertEqual(self._args, [
+                'ffmpeg', '-y', '-i', 'http://www.example.com/',
+                '-c', 'copy', '-f', 'mp4', 'file:test'])
+
+            # Test cookies arg is added
+            ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
+            downloader._call_downloader('test', info_dict)
+            self.assertEqual(self._args, [
+                'ffmpeg', '-y', '-cookies', TEST_COOKIE_VALUE + '; path=/; domain=.example.com;\r\n',
+                '-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test'])
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 6 - 13
test/test_downloader_http.py

@@ -9,7 +9,11 @@ import sys
 import unittest
 import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-from test.helper import http_server_port, try_rm
+from test.helper import (
+    FakeLogger,
+    http_server_port,
+    try_rm,
+)
 from youtube_dl import YoutubeDL
 from youtube_dl import YoutubeDL
 from youtube_dl.compat import compat_http_server
 from youtube_dl.compat import compat_http_server
 from youtube_dl.downloader.http import HttpFD
 from youtube_dl.downloader.http import HttpFD
@@ -66,17 +70,6 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
             assert False
             assert False
 
 
 
 
-class FakeLogger(object):
-    def debug(self, msg):
-        pass
-
-    def warning(self, msg):
-        pass
-
-    def error(self, msg):
-        pass
-
-
 class TestHttpFD(unittest.TestCase):
 class TestHttpFD(unittest.TestCase):
     def setUp(self):
     def setUp(self):
         self.httpd = compat_http_server.HTTPServer(
         self.httpd = compat_http_server.HTTPServer(
@@ -95,7 +88,7 @@ class TestHttpFD(unittest.TestCase):
         self.assertTrue(downloader.real_download(filename, {
         self.assertTrue(downloader.real_download(filename, {
             'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
             'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
         }))
         }))
-        self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
+        self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
         try_rm(encodeFilename(filename))
         try_rm(encodeFilename(filename))
 
 
     def download_all(self, params):
     def download_all(self, params):

+ 24 - 16
test/test_execution.py

@@ -8,46 +8,54 @@ import unittest
 import sys
 import sys
 import os
 import os
 import subprocess
 import subprocess
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
+rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+sys.path.insert(0, rootDir)
+
+from youtube_dl.compat import compat_register_utf8, compat_subprocess_get_DEVNULL
 from youtube_dl.utils import encodeArgument
 from youtube_dl.utils import encodeArgument
 
 
-rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+compat_register_utf8()
 
 
 
 
-try:
-    _DEV_NULL = subprocess.DEVNULL
-except AttributeError:
-    _DEV_NULL = open(os.devnull, 'wb')
+_DEV_NULL = compat_subprocess_get_DEVNULL()
 
 
 
 
 class TestExecution(unittest.TestCase):
 class TestExecution(unittest.TestCase):
+    def setUp(self):
+        self.module = 'youtube_dl'
+        if sys.version_info < (2, 7):
+            self.module += '.__main__'
+
     def test_import(self):
     def test_import(self):
         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
 
 
     def test_module_exec(self):
     def test_module_exec(self):
-        if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution
-            subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
+        subprocess.check_call([sys.executable, '-m', self.module, '--version'], cwd=rootDir, stdout=_DEV_NULL)
 
 
     def test_main_exec(self):
     def test_main_exec(self):
-        subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
+        subprocess.check_call([sys.executable, os.path.normpath('youtube_dl/__main__.py'), '--version'], cwd=rootDir, stdout=_DEV_NULL)
 
 
     def test_cmdline_umlauts(self):
     def test_cmdline_umlauts(self):
+        os.environ['PYTHONIOENCODING'] = 'utf-8'
         p = subprocess.Popen(
         p = subprocess.Popen(
-            [sys.executable, 'youtube_dl/__main__.py', encodeArgument('ä'), '--version'],
+            [sys.executable, '-m', self.module, encodeArgument('ä'), '--version'],
             cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
             cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
         _, stderr = p.communicate()
         _, stderr = p.communicate()
         self.assertFalse(stderr)
         self.assertFalse(stderr)
 
 
     def test_lazy_extractors(self):
     def test_lazy_extractors(self):
+        lazy_extractors = os.path.normpath('youtube_dl/extractor/lazy_extractors.py')
         try:
         try:
-            subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'], cwd=rootDir, stdout=_DEV_NULL)
-            subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL)
+            subprocess.check_call([sys.executable, os.path.normpath('devscripts/make_lazy_extractors.py'), lazy_extractors], cwd=rootDir, stdout=_DEV_NULL)
+            subprocess.check_call([sys.executable, os.path.normpath('test/test_all_urls.py')], cwd=rootDir, stdout=_DEV_NULL)
         finally:
         finally:
-            try:
-                os.remove('youtube_dl/extractor/lazy_extractors.py')
-            except (IOError, OSError):
-                pass
+            for x in ('', 'c') if sys.version_info[0] < 3 else ('',):
+                try:
+                    os.remove(lazy_extractors + x)
+                except OSError:
+                    pass
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 494 - 56
test/test_http.py

@@ -8,30 +8,163 @@ import sys
 import unittest
 import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-from test.helper import http_server_port
-from youtube_dl import YoutubeDL
-from youtube_dl.compat import compat_http_server, compat_urllib_request
+import contextlib
+import gzip
+import io
 import ssl
 import ssl
+import tempfile
 import threading
 import threading
+import zlib
+
+# avoid deprecated alias assertRaisesRegexp
+if hasattr(unittest.TestCase, 'assertRaisesRegex'):
+    unittest.TestCase.assertRaisesRegexp = unittest.TestCase.assertRaisesRegex
+
+try:
+    import brotli
+except ImportError:
+    brotli = None
+try:
+    from urllib.request import pathname2url
+except ImportError:
+    from urllib import pathname2url
+
+from youtube_dl.compat import (
+    compat_http_cookiejar_Cookie,
+    compat_http_server,
+    compat_str as str,
+    compat_urllib_error,
+    compat_urllib_HTTPError,
+    compat_urllib_parse,
+    compat_urllib_request,
+)
+
+from youtube_dl.utils import (
+    sanitized_Request,
+    update_Request,
+    urlencode_postdata,
+)
+
+from test.helper import (
+    expectedFailureIf,
+    FakeYDL,
+    FakeLogger,
+    http_server_port,
+)
+from youtube_dl import YoutubeDL
 
 
 TEST_DIR = os.path.dirname(os.path.abspath(__file__))
 TEST_DIR = os.path.dirname(os.path.abspath(__file__))
 
 
 
 
 class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
 class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
+    protocol_version = 'HTTP/1.1'
+
+    # work-around old/new -style class inheritance
+    def super(self, meth_name, *args, **kwargs):
+        from types import MethodType
+        try:
+            super()
+            fn = lambda s, m, *a, **k: getattr(super(), m)(*a, **k)
+        except TypeError:
+            fn = lambda s, m, *a, **k: getattr(compat_http_server.BaseHTTPRequestHandler, m)(s, *a, **k)
+        self.super = MethodType(fn, self)
+        return self.super(meth_name, *args, **kwargs)
+
     def log_message(self, format, *args):
     def log_message(self, format, *args):
         pass
         pass
 
 
+    def _headers(self):
+        payload = str(self.headers).encode('utf-8')
+        self.send_response(200)
+        self.send_header('Content-Type', 'application/json')
+        self.send_header('Content-Length', str(len(payload)))
+        self.end_headers()
+        self.wfile.write(payload)
+
+    def _redirect(self):
+        self.send_response(int(self.path[len('/redirect_'):]))
+        self.send_header('Location', '/method')
+        self.send_header('Content-Length', '0')
+        self.end_headers()
+
+    def _method(self, method, payload=None):
+        self.send_response(200)
+        self.send_header('Content-Length', str(len(payload or '')))
+        self.send_header('Method', method)
+        self.end_headers()
+        if payload:
+            self.wfile.write(payload)
+
+    def _status(self, status):
+        payload = '<html>{0} NOT FOUND</html>'.format(status).encode('utf-8')
+        self.send_response(int(status))
+        self.send_header('Content-Type', 'text/html; charset=utf-8')
+        self.send_header('Content-Length', str(len(payload)))
+        self.end_headers()
+        self.wfile.write(payload)
+
+    def _read_data(self):
+        if 'Content-Length' in self.headers:
+            return self.rfile.read(int(self.headers['Content-Length']))
+
+    def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
+        return '{0}://{1}:{2}/{3}'.format(
+            scheme, host,
+            port if port is not None
+            else http_server_port(self.server), path)
+
+    def do_POST(self):
+        data = self._read_data()
+        if self.path.startswith('/redirect_'):
+            self._redirect()
+        elif self.path.startswith('/method'):
+            self._method('POST', data)
+        elif self.path.startswith('/headers'):
+            self._headers()
+        else:
+            self._status(404)
+
+    def do_HEAD(self):
+        if self.path.startswith('/redirect_'):
+            self._redirect()
+        elif self.path.startswith('/method'):
+            self._method('HEAD')
+        else:
+            self._status(404)
+
+    def do_PUT(self):
+        data = self._read_data()
+        if self.path.startswith('/redirect_'):
+            self._redirect()
+        elif self.path.startswith('/method'):
+            self._method('PUT', data)
+        else:
+            self._status(404)
+
     def do_GET(self):
     def do_GET(self):
-        if self.path == '/video.html':
-            self.send_response(200)
-            self.send_header('Content-Type', 'text/html; charset=utf-8')
+
+        def respond(payload=b'<html><video src="/vid.mp4" /></html>',
+                    payload_type='text/html; charset=utf-8',
+                    payload_encoding=None,
+                    resp_code=200):
+            self.send_response(resp_code)
+            self.send_header('Content-Type', payload_type)
+            if payload_encoding:
+                self.send_header('Content-Encoding', payload_encoding)
+            self.send_header('Content-Length', str(len(payload)))  # required for persistent connections
             self.end_headers()
             self.end_headers()
-            self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
+            self.wfile.write(payload)
+
+        def gzip_compress(p):
+            buf = io.BytesIO()
+            with contextlib.closing(gzip.GzipFile(fileobj=buf, mode='wb')) as f:
+                f.write(p)
+            return buf.getvalue()
+
+        if self.path == '/video.html':
+            respond()
         elif self.path == '/vid.mp4':
         elif self.path == '/vid.mp4':
-            self.send_response(200)
-            self.send_header('Content-Type', 'video/mp4')
-            self.end_headers()
-            self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
+            respond(b'\x00\x00\x00\x00\x20\x66\x74[video]', 'video/mp4')
         elif self.path == '/302':
         elif self.path == '/302':
             if sys.version_info[0] == 3:
             if sys.version_info[0] == 3:
                 # XXX: Python 3 http server does not allow non-ASCII header values
                 # XXX: Python 3 http server does not allow non-ASCII header values
@@ -39,71 +172,336 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
                 self.end_headers()
                 self.end_headers()
                 return
                 return
 
 
-            new_url = 'http://127.0.0.1:%d/中文.html' % http_server_port(self.server)
+            new_url = self._test_url('中文.html')
             self.send_response(302)
             self.send_response(302)
             self.send_header(b'Location', new_url.encode('utf-8'))
             self.send_header(b'Location', new_url.encode('utf-8'))
             self.end_headers()
             self.end_headers()
         elif self.path == '/%E4%B8%AD%E6%96%87.html':
         elif self.path == '/%E4%B8%AD%E6%96%87.html':
-            self.send_response(200)
-            self.send_header('Content-Type', 'text/html; charset=utf-8')
+            respond()
+        elif self.path == '/%c7%9f':
+            respond()
+        elif self.path == '/redirect_dotsegments':
+            self.send_response(301)
+            # redirect to /headers but with dot segments before
+            self.send_header('Location', '/a/b/./../../headers')
+            self.send_header('Content-Length', '0')
             self.end_headers()
             self.end_headers()
-            self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
+        elif self.path.startswith('/redirect_'):
+            self._redirect()
+        elif self.path.startswith('/method'):
+            self._method('GET')
+        elif self.path.startswith('/headers'):
+            self._headers()
+        elif self.path.startswith('/308-to-headers'):
+            self.send_response(308)
+            self.send_header('Location', '/headers')
+            self.send_header('Content-Length', '0')
+            self.end_headers()
+        elif self.path == '/trailing_garbage':
+            payload = b'<html><video src="/vid.mp4" /></html>'
+            compressed = gzip_compress(payload) + b'trailing garbage'
+            respond(compressed, payload_encoding='gzip')
+        elif self.path == '/302-non-ascii-redirect':
+            new_url = self._test_url('中文.html')
+            # actually respond with permanent redirect
+            self.send_response(301)
+            self.send_header('Location', new_url)
+            self.send_header('Content-Length', '0')
+            self.end_headers()
+        elif self.path == '/content-encoding':
+            encodings = self.headers.get('ytdl-encoding', '')
+            payload = b'<html><video src="/vid.mp4" /></html>'
+            for encoding in filter(None, (e.strip() for e in encodings.split(','))):
+                if encoding == 'br' and brotli:
+                    payload = brotli.compress(payload)
+                elif encoding == 'gzip':
+                    payload = gzip_compress(payload)
+                elif encoding == 'deflate':
+                    payload = zlib.compress(payload)
+                elif encoding == 'unsupported':
+                    payload = b'raw'
+                    break
+                else:
+                    self._status(415)
+                    return
+            respond(payload, payload_encoding=encodings)
+
         else:
         else:
-            assert False
+            self._status(404)
 
 
+    def send_header(self, keyword, value):
+        """
+        Forcibly allow HTTP server to send non percent-encoded non-ASCII characters in headers.
+        This is against what is defined in RFC 3986: but we need to test that we support this
+        since some sites incorrectly do this.
+        """
+        if keyword.lower() == 'connection':
+            return self.super('send_header', keyword, value)
 
 
-class FakeLogger(object):
-    def debug(self, msg):
-        pass
+        if not hasattr(self, '_headers_buffer'):
+            self._headers_buffer = []
 
 
-    def warning(self, msg):
-        pass
+        self._headers_buffer.append('{0}: {1}\r\n'.format(keyword, value).encode('utf-8'))
 
 
-    def error(self, msg):
-        pass
+    def end_headers(self):
+        if hasattr(self, '_headers_buffer'):
+            self.wfile.write(b''.join(self._headers_buffer))
+            self._headers_buffer = []
+        self.super('end_headers')
 
 
 
 
 class TestHTTP(unittest.TestCase):
 class TestHTTP(unittest.TestCase):
+    # when does it make sense to check the SSL certificate?
+    _check_cert = (
+        sys.version_info >= (3, 2)
+        or (sys.version_info[0] == 2 and sys.version_info[1:] >= (7, 19)))
+
     def setUp(self):
     def setUp(self):
-        self.httpd = compat_http_server.HTTPServer(
+        # HTTP server
+        self.http_httpd = compat_http_server.HTTPServer(
             ('127.0.0.1', 0), HTTPTestRequestHandler)
             ('127.0.0.1', 0), HTTPTestRequestHandler)
-        self.port = http_server_port(self.httpd)
-        self.server_thread = threading.Thread(target=self.httpd.serve_forever)
-        self.server_thread.daemon = True
-        self.server_thread.start()
+        self.http_port = http_server_port(self.http_httpd)
 
 
-    def test_unicode_path_redirection(self):
-        # XXX: Python 3 http server does not allow non-ASCII header values
-        if sys.version_info[0] == 3:
-            return
+        self.http_server_thread = threading.Thread(target=self.http_httpd.serve_forever)
+        self.http_server_thread.daemon = True
+        self.http_server_thread.start()
 
 
-        ydl = YoutubeDL({'logger': FakeLogger()})
-        r = ydl.extract_info('http://127.0.0.1:%d/302' % self.port)
-        self.assertEqual(r['entries'][0]['url'], 'http://127.0.0.1:%d/vid.mp4' % self.port)
+        try:
+            from http.server import ThreadingHTTPServer
+        except ImportError:
+            try:
+                from socketserver import ThreadingMixIn
+            except ImportError:
+                from SocketServer import ThreadingMixIn
 
 
+            class ThreadingHTTPServer(ThreadingMixIn, compat_http_server.HTTPServer):
+                pass
 
 
-class TestHTTPS(unittest.TestCase):
-    def setUp(self):
+        # HTTPS server
         certfn = os.path.join(TEST_DIR, 'testcert.pem')
         certfn = os.path.join(TEST_DIR, 'testcert.pem')
-        self.httpd = compat_http_server.HTTPServer(
+        self.https_httpd = ThreadingHTTPServer(
             ('127.0.0.1', 0), HTTPTestRequestHandler)
             ('127.0.0.1', 0), HTTPTestRequestHandler)
-        self.httpd.socket = ssl.wrap_socket(
-            self.httpd.socket, certfile=certfn, server_side=True)
-        self.port = http_server_port(self.httpd)
-        self.server_thread = threading.Thread(target=self.httpd.serve_forever)
-        self.server_thread.daemon = True
-        self.server_thread.start()
+        try:
+            sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+            sslctx.verify_mode = ssl.CERT_NONE
+            sslctx.check_hostname = False
+            sslctx.load_cert_chain(certfn, None)
+            self.https_httpd.socket = sslctx.wrap_socket(
+                self.https_httpd.socket, server_side=True)
+        except AttributeError:
+            self.https_httpd.socket = ssl.wrap_socket(
+                self.https_httpd.socket, certfile=certfn, server_side=True)
+
+        self.https_port = http_server_port(self.https_httpd)
+        self.https_server_thread = threading.Thread(target=self.https_httpd.serve_forever)
+        self.https_server_thread.daemon = True
+        self.https_server_thread.start()
 
 
+    def tearDown(self):
+
+        def closer(svr):
+            def _closer():
+                svr.shutdown()
+                svr.server_close()
+            return _closer
+
+        shutdown_thread = threading.Thread(target=closer(self.http_httpd))
+        shutdown_thread.start()
+        self.http_server_thread.join(2.0)
+
+        shutdown_thread = threading.Thread(target=closer(self.https_httpd))
+        shutdown_thread.start()
+        self.https_server_thread.join(2.0)
+
+    def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
+        return '{0}://{1}:{2}/{3}'.format(
+            scheme, host,
+            port if port is not None
+            else self.https_port if scheme == 'https'
+            else self.http_port, path)
+
+    @unittest.skipUnless(_check_cert, 'No support for certificate check in SSL')
     def test_nocheckcertificate(self):
     def test_nocheckcertificate(self):
-        if sys.version_info >= (2, 7, 9):  # No certificate checking anyways
-            ydl = YoutubeDL({'logger': FakeLogger()})
-            self.assertRaises(
-                Exception,
-                ydl.extract_info, 'https://127.0.0.1:%d/video.html' % self.port)
+        with FakeYDL({'logger': FakeLogger()}) as ydl:
+            with self.assertRaises(compat_urllib_error.URLError):
+                ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
+
+        with FakeYDL({'logger': FakeLogger(), 'nocheckcertificate': True}) as ydl:
+            r = ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
+            self.assertEqual(r.getcode(), 200)
+            r.close()
+
+    def test_percent_encode(self):
+        with FakeYDL() as ydl:
+            # Unicode characters should be encoded with uppercase percent-encoding
+            res = ydl.urlopen(sanitized_Request(self._test_url('中文.html')))
+            self.assertEqual(res.getcode(), 200)
+            res.close()
+            # don't normalize existing percent encodings
+            res = ydl.urlopen(sanitized_Request(self._test_url('%c7%9f')))
+            self.assertEqual(res.getcode(), 200)
+            res.close()
+
+    def test_unicode_path_redirection(self):
+        with FakeYDL() as ydl:
+            r = ydl.urlopen(sanitized_Request(self._test_url('302-non-ascii-redirect')))
+            self.assertEqual(r.url, self._test_url('%E4%B8%AD%E6%96%87.html'))
+            r.close()
+
+    def test_redirect(self):
+        with FakeYDL() as ydl:
+            def do_req(redirect_status, method, check_no_content=False):
+                data = b'testdata' if method in ('POST', 'PUT') else None
+                res = ydl.urlopen(sanitized_Request(
+                    self._test_url('redirect_{0}'.format(redirect_status)),
+                    method=method, data=data))
+                if check_no_content:
+                    self.assertNotIn('Content-Type', res.headers)
+                return res.read().decode('utf-8'), res.headers.get('method', '')
+            # A 303 must either use GET or HEAD for subsequent request
+            self.assertEqual(do_req(303, 'POST'), ('', 'GET'))
+            self.assertEqual(do_req(303, 'HEAD'), ('', 'HEAD'))
+
+            self.assertEqual(do_req(303, 'PUT'), ('', 'GET'))
+
+            # 301 and 302 turn POST only into a GET, with no Content-Type
+            self.assertEqual(do_req(301, 'POST', True), ('', 'GET'))
+            self.assertEqual(do_req(301, 'HEAD'), ('', 'HEAD'))
+            self.assertEqual(do_req(302, 'POST', True), ('', 'GET'))
+            self.assertEqual(do_req(302, 'HEAD'), ('', 'HEAD'))
+
+            self.assertEqual(do_req(301, 'PUT'), ('testdata', 'PUT'))
+            self.assertEqual(do_req(302, 'PUT'), ('testdata', 'PUT'))
+
+            # 307 and 308 should not change method
+            for m in ('POST', 'PUT'):
+                self.assertEqual(do_req(307, m), ('testdata', m))
+                self.assertEqual(do_req(308, m), ('testdata', m))
+
+            self.assertEqual(do_req(307, 'HEAD'), ('', 'HEAD'))
+            self.assertEqual(do_req(308, 'HEAD'), ('', 'HEAD'))
+
+            # These should not redirect and instead raise an HTTPError
+            for code in (300, 304, 305, 306):
+                with self.assertRaises(compat_urllib_HTTPError):
+                    do_req(code, 'GET')
+
+    # Jython 2.7.1 times out for some reason
+    @expectedFailureIf(sys.platform.startswith('java') and sys.version_info < (2, 7, 2))
+    def test_content_type(self):
+        # https://github.com/yt-dlp/yt-dlp/commit/379a4f161d4ad3e40932dcf5aca6e6fb9715ab28
+        with FakeYDL({'nocheckcertificate': True}) as ydl:
+            # method should be auto-detected as POST
+            r = sanitized_Request(self._test_url('headers', scheme='https'), data=urlencode_postdata({'test': 'test'}))
+
+            headers = ydl.urlopen(r).read().decode('utf-8')
+            self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
+
+            # test http
+            r = sanitized_Request(self._test_url('headers'), data=urlencode_postdata({'test': 'test'}))
+            headers = ydl.urlopen(r).read().decode('utf-8')
+            self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
+
+    def test_update_req(self):
+        req = sanitized_Request('http://example.com')
+        assert req.data is None
+        assert req.get_method() == 'GET'
+        assert not req.has_header('Content-Type')
+        # Test that zero-byte payloads will be sent
+        req = update_Request(req, data=b'')
+        assert req.data == b''
+        assert req.get_method() == 'POST'
+        # yt-dl expects data to be encoded and Content-Type to be added by sender
+        # assert req.get_header('Content-Type') == 'application/x-www-form-urlencoded'
 
 
-        ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
-        r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
-        self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
+    def test_cookiejar(self):
+        with FakeYDL() as ydl:
+            ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
+                0, 'test', 'ytdl', None, False, '127.0.0.1', True,
+                False, '/headers', True, False, None, False, None, None, {}))
+            data = ydl.urlopen(sanitized_Request(
+                self._test_url('headers'))).read().decode('utf-8')
+            self.assertIn('Cookie: test=ytdl', data)
+
+    def test_passed_cookie_header(self):
+        # We should accept a Cookie header being passed as in normal headers and handle it appropriately.
+        with FakeYDL() as ydl:
+            # Specified Cookie header should be used
+            res = ydl.urlopen(sanitized_Request(
+                self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
+            self.assertIn('Cookie: test=test', res)
+
+            # Specified Cookie header should be removed on any redirect
+            res = ydl.urlopen(sanitized_Request(
+                self._test_url('308-to-headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
+            self.assertNotIn('Cookie: test=test', res)
+
+            # Specified Cookie header should override global cookiejar for that request
+            ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
+                0, 'test', 'ytdlp', None, False, '127.0.0.1', True,
+                False, '/headers', True, False, None, False, None, None, {}))
+            data = ydl.urlopen(sanitized_Request(
+                self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
+            self.assertNotIn('Cookie: test=ytdlp', data)
+            self.assertIn('Cookie: test=test', data)
+
+    def test_no_compression_compat_header(self):
+        with FakeYDL() as ydl:
+            data = ydl.urlopen(
+                sanitized_Request(
+                    self._test_url('headers'),
+                    headers={'Youtubedl-no-compression': True})).read()
+            self.assertIn(b'Accept-Encoding: identity', data)
+            self.assertNotIn(b'youtubedl-no-compression', data.lower())
+
+    def test_gzip_trailing_garbage(self):
+        # https://github.com/ytdl-org/youtube-dl/commit/aa3e950764337ef9800c936f4de89b31c00dfcf5
+        # https://github.com/ytdl-org/youtube-dl/commit/6f2ec15cee79d35dba065677cad9da7491ec6e6f
+        with FakeYDL() as ydl:
+            data = ydl.urlopen(sanitized_Request(self._test_url('trailing_garbage'))).read().decode('utf-8')
+            self.assertEqual(data, '<html><video src="/vid.mp4" /></html>')
+
+    def __test_compression(self, encoding):
+        with FakeYDL() as ydl:
+            res = ydl.urlopen(
+                sanitized_Request(
+                    self._test_url('content-encoding'),
+                    headers={'ytdl-encoding': encoding}))
+            # decoded encodings are removed: only check for valid decompressed data
+            self.assertEqual(res.read(), b'<html><video src="/vid.mp4" /></html>')
+
+    @unittest.skipUnless(brotli, 'brotli support is not installed')
+    def test_brotli(self):
+        self.__test_compression('br')
+
+    def test_deflate(self):
+        self.__test_compression('deflate')
+
+    def test_gzip(self):
+        self.__test_compression('gzip')
+
+    def test_multiple_encodings(self):
+        # https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4
+        for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
+            self.__test_compression(pair)
+
+    def test_unsupported_encoding(self):
+        # it should return the raw content
+        with FakeYDL() as ydl:
+            res = ydl.urlopen(
+                sanitized_Request(
+                    self._test_url('content-encoding'),
+                    headers={'ytdl-encoding': 'unsupported'}))
+            self.assertEqual(res.headers.get('Content-Encoding'), 'unsupported')
+            self.assertEqual(res.read(), b'raw')
+
+    def test_remove_dot_segments(self):
+        with FakeYDL() as ydl:
+            res = ydl.urlopen(sanitized_Request(self._test_url('a/b/./../../headers')))
+            self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
+
+            res = ydl.urlopen(sanitized_Request(self._test_url('redirect_dotsegments')))
+            self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
 
 
 
 
 def _build_proxy_handler(name):
 def _build_proxy_handler(name):
@@ -117,7 +515,7 @@ def _build_proxy_handler(name):
             self.send_response(200)
             self.send_response(200)
             self.send_header('Content-Type', 'text/plain; charset=utf-8')
             self.send_header('Content-Type', 'text/plain; charset=utf-8')
             self.end_headers()
             self.end_headers()
-            self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8'))
+            self.wfile.write('{0}: {1}'.format(self.proxy_name, self.path).encode('utf-8'))
     return HTTPTestRequestHandler
     return HTTPTestRequestHandler
 
 
 
 
@@ -137,10 +535,30 @@ class TestProxy(unittest.TestCase):
         self.geo_proxy_thread.daemon = True
         self.geo_proxy_thread.daemon = True
         self.geo_proxy_thread.start()
         self.geo_proxy_thread.start()
 
 
+    def tearDown(self):
+
+        def closer(svr):
+            def _closer():
+                svr.shutdown()
+                svr.server_close()
+            return _closer
+
+        shutdown_thread = threading.Thread(target=closer(self.proxy))
+        shutdown_thread.start()
+        self.proxy_thread.join(2.0)
+
+        shutdown_thread = threading.Thread(target=closer(self.geo_proxy))
+        shutdown_thread.start()
+        self.geo_proxy_thread.join(2.0)
+
+    def _test_proxy(self, host='127.0.0.1', port=None):
+        return '{0}:{1}'.format(
+            host, port if port is not None else self.port)
+
     def test_proxy(self):
     def test_proxy(self):
-        geo_proxy = '127.0.0.1:{0}'.format(self.geo_port)
+        geo_proxy = self._test_proxy(port=self.geo_port)
         ydl = YoutubeDL({
         ydl = YoutubeDL({
-            'proxy': '127.0.0.1:{0}'.format(self.port),
+            'proxy': self._test_proxy(),
             'geo_verification_proxy': geo_proxy,
             'geo_verification_proxy': geo_proxy,
         })
         })
         url = 'http://foo.com/bar'
         url = 'http://foo.com/bar'
@@ -154,7 +572,7 @@ class TestProxy(unittest.TestCase):
 
 
     def test_proxy_with_idn(self):
     def test_proxy_with_idn(self):
         ydl = YoutubeDL({
         ydl = YoutubeDL({
-            'proxy': '127.0.0.1:{0}'.format(self.port),
+            'proxy': self._test_proxy(),
         })
         })
         url = 'http://中文.tw/'
         url = 'http://中文.tw/'
         response = ydl.urlopen(url).read().decode('utf-8')
         response = ydl.urlopen(url).read().decode('utf-8')
@@ -162,5 +580,25 @@ class TestProxy(unittest.TestCase):
         self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
         self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
 
 
 
 
+class TestFileURL(unittest.TestCase):
+    # See https://github.com/ytdl-org/youtube-dl/issues/8227
+    def test_file_urls(self):
+        tf = tempfile.NamedTemporaryFile(delete=False)
+        tf.write(b'foobar')
+        tf.close()
+        url = compat_urllib_parse.urljoin('file://', pathname2url(tf.name))
+        with FakeYDL() as ydl:
+            self.assertRaisesRegexp(
+                compat_urllib_error.URLError, 'file:// scheme is explicitly disabled in youtube-dl for security reasons', ydl.urlopen, url)
+        # not yet implemented
+        """
+        with FakeYDL({'enable_file_urls': True}) as ydl:
+            res = ydl.urlopen(url)
+            self.assertEqual(res.read(), b'foobar')
+            res.close()
+        """
+        os.unlink(tf.name)
+
+
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 332 - 113
test/test_jsinterp.py


+ 73 - 27
test/test_subtitles.py

@@ -38,6 +38,9 @@ class BaseTestSubtitles(unittest.TestCase):
         self.DL = FakeYDL()
         self.DL = FakeYDL()
         self.ie = self.IE()
         self.ie = self.IE()
         self.DL.add_info_extractor(self.ie)
         self.DL.add_info_extractor(self.ie)
+        if not self.IE.working():
+            print('Skipping: %s marked as not _WORKING' % self.IE.ie_key())
+            self.skipTest('IE marked as not _WORKING')
 
 
     def getInfoDict(self):
     def getInfoDict(self):
         info_dict = self.DL.extract_info(self.url, download=False)
         info_dict = self.DL.extract_info(self.url, download=False)
@@ -56,6 +59,21 @@ class BaseTestSubtitles(unittest.TestCase):
 
 
 
 
 class TestYoutubeSubtitles(BaseTestSubtitles):
 class TestYoutubeSubtitles(BaseTestSubtitles):
+    # Available subtitles for QRS8MkLhQmM:
+    # Language formats
+    # ru       vtt, ttml, srv3, srv2, srv1, json3
+    # fr       vtt, ttml, srv3, srv2, srv1, json3
+    # en       vtt, ttml, srv3, srv2, srv1, json3
+    # nl       vtt, ttml, srv3, srv2, srv1, json3
+    # de       vtt, ttml, srv3, srv2, srv1, json3
+    # ko       vtt, ttml, srv3, srv2, srv1, json3
+    # it       vtt, ttml, srv3, srv2, srv1, json3
+    # zh-Hant  vtt, ttml, srv3, srv2, srv1, json3
+    # hi       vtt, ttml, srv3, srv2, srv1, json3
+    # pt-BR    vtt, ttml, srv3, srv2, srv1, json3
+    # es-MX    vtt, ttml, srv3, srv2, srv1, json3
+    # ja       vtt, ttml, srv3, srv2, srv1, json3
+    # pl       vtt, ttml, srv3, srv2, srv1, json3
     url = 'QRS8MkLhQmM'
     url = 'QRS8MkLhQmM'
     IE = YoutubeIE
     IE = YoutubeIE
 
 
@@ -64,41 +82,60 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
         self.assertEqual(len(subtitles.keys()), 13)
         self.assertEqual(len(subtitles.keys()), 13)
-        self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
-        self.assertEqual(md5(subtitles['it']), '6d752b98c31f1cf8d597050c7a2cb4b5')
+        self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
+        self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
         for lang in ['fr', 'de']:
         for lang in ['fr', 'de']:
             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 
 
-    def test_youtube_subtitles_ttml_format(self):
+    def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
         self.DL.params['writesubtitles'] = True
         self.DL.params['writesubtitles'] = True
-        self.DL.params['subtitlesformat'] = 'ttml'
+        self.DL.params['subtitlesformat'] = fmt
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['en']), 'e306f8c42842f723447d9f63ad65df54')
+        self.assertEqual(md5(subtitles[lang]), md5_hash)
+
+    def test_youtube_subtitles_ttml_format(self):
+        self._test_subtitles_format('ttml', 'c97ddf1217390906fa9fbd34901f3da2')
 
 
     def test_youtube_subtitles_vtt_format(self):
     def test_youtube_subtitles_vtt_format(self):
-        self.DL.params['writesubtitles'] = True
-        self.DL.params['subtitlesformat'] = 'vtt'
-        subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
+        self._test_subtitles_format('vtt', 'ae1bd34126571a77aabd4d276b28044d')
 
 
-    def test_youtube_automatic_captions(self):
-        self.url = '8YoUxe5ncPo'
+    def test_youtube_subtitles_json3_format(self):
+        self._test_subtitles_format('json3', '688dd1ce0981683867e7fe6fde2a224b')
+
+    def _test_automatic_captions(self, url, lang):
+        self.url = url
         self.DL.params['writeautomaticsub'] = True
         self.DL.params['writeautomaticsub'] = True
-        self.DL.params['subtitleslangs'] = ['it']
+        self.DL.params['subtitleslangs'] = [lang]
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
-        self.assertTrue(subtitles['it'] is not None)
+        self.assertTrue(subtitles[lang] is not None)
 
 
+    def test_youtube_automatic_captions(self):
+        # Available automatic captions for 8YoUxe5ncPo:
+        # Language formats (all in vtt, ttml, srv3, srv2, srv1, json3)
+        # gu, zh-Hans, zh-Hant, gd, ga, gl, lb, la, lo, tt, tr,
+        # lv, lt, tk, th, tg, te, fil, haw, yi, ceb, yo, de, da,
+        # el, eo, en, eu, et, es, ru, rw, ro, bn, be, bg, uk, jv,
+        # bs, ja, or, xh, co, ca, cy, cs, ps, pt, pa, vi, pl, hy,
+        # hr, ht, hu, hmn, hi, ha, mg, uz, ml, mn, mi, mk, ur,
+        # mt, ms, mr, ug, ta, my, af, sw, is, am,
+        #                                         *it*, iw, sv, ar,
+        # su, zu, az, id, ig, nl, no, ne, ny, fr, ku, fy, fa, fi,
+        # ka, kk, sr, sq, ko, kn, km, st, sk, si, so, sn, sm, sl,
+        # ky, sd
+        # ...
+        self._test_automatic_captions('8YoUxe5ncPo', 'it')
+
+    @unittest.skip('ASR subs all in all supported langs now')
     def test_youtube_translated_subtitles(self):
     def test_youtube_translated_subtitles(self):
-        # This video has a subtitles track, which can be translated
-        self.url = 'Ky9eprVWzlI'
-        self.DL.params['writeautomaticsub'] = True
-        self.DL.params['subtitleslangs'] = ['it']
-        subtitles = self.getSubtitles()
-        self.assertTrue(subtitles['it'] is not None)
+        # This video has a subtitles track, which can be translated (#4555)
+        self._test_automatic_captions('Ky9eprVWzlI', 'it')
 
 
     def test_youtube_nosubtitles(self):
     def test_youtube_nosubtitles(self):
         self.DL.expect_warning('video doesn\'t have subtitles')
         self.DL.expect_warning('video doesn\'t have subtitles')
-        self.url = 'n5BB19UTcdA'
+        # Available automatic captions for 8YoUxe5ncPo:
+        # ...
+        # 8YoUxe5ncPo has no subtitles
+        self.url = '8YoUxe5ncPo'
         self.DL.params['writesubtitles'] = True
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
@@ -128,6 +165,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
         self.assertFalse(subtitles)
         self.assertFalse(subtitles)
 
 
 
 
+@unittest.skip('IE broken')
 class TestTedSubtitles(BaseTestSubtitles):
 class TestTedSubtitles(BaseTestSubtitles):
     url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
     url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
     IE = TEDIE
     IE = TEDIE
@@ -152,18 +190,19 @@ class TestVimeoSubtitles(BaseTestSubtitles):
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
-        self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
-        self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
+        self.assertEqual(md5(subtitles['en']), '386cbc9320b94e25cb364b97935e5dd1')
+        self.assertEqual(md5(subtitles['fr']), 'c9b69eef35bc6641c0d4da8a04f9dfac')
 
 
     def test_nosubtitles(self):
     def test_nosubtitles(self):
         self.DL.expect_warning('video doesn\'t have subtitles')
         self.DL.expect_warning('video doesn\'t have subtitles')
-        self.url = 'http://vimeo.com/56015672'
+        self.url = 'http://vimeo.com/68093876'
         self.DL.params['writesubtitles'] = True
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
         self.assertFalse(subtitles)
         self.assertFalse(subtitles)
 
 
 
 
+@unittest.skip('IE broken')
 class TestWallaSubtitles(BaseTestSubtitles):
 class TestWallaSubtitles(BaseTestSubtitles):
     url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
     url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
     IE = WallaIE
     IE = WallaIE
@@ -185,6 +224,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
         self.assertFalse(subtitles)
         self.assertFalse(subtitles)
 
 
 
 
+@unittest.skip('IE broken')
 class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
 class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
     url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
     url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
     IE = CeskaTelevizeIE
     IE = CeskaTelevizeIE
@@ -206,6 +246,7 @@ class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
         self.assertFalse(subtitles)
         self.assertFalse(subtitles)
 
 
 
 
+@unittest.skip('IE broken')
 class TestLyndaSubtitles(BaseTestSubtitles):
 class TestLyndaSubtitles(BaseTestSubtitles):
     url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
     url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
     IE = LyndaIE
     IE = LyndaIE
@@ -218,6 +259,7 @@ class TestLyndaSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
         self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
 
 
 
 
+@unittest.skip('IE broken')
 class TestNPOSubtitles(BaseTestSubtitles):
 class TestNPOSubtitles(BaseTestSubtitles):
     url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
     url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
     IE = NPOIE
     IE = NPOIE
@@ -230,6 +272,7 @@ class TestNPOSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
         self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
 
 
 
 
+@unittest.skip('IE broken')
 class TestMTVSubtitles(BaseTestSubtitles):
 class TestMTVSubtitles(BaseTestSubtitles):
     url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
     url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
     IE = ComedyCentralIE
     IE = ComedyCentralIE
@@ -252,9 +295,10 @@ class TestNRKSubtitles(BaseTestSubtitles):
     def test_allsubtitles(self):
     def test_allsubtitles(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['writesubtitles'] = True
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
+        self.DL.params['format'] = 'best/bestvideo'
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
-        self.assertEqual(set(subtitles.keys()), set(['no']))
-        self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
+        self.assertEqual(set(subtitles.keys()), set(['nb-ttv']))
+        self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149')
 
 
 
 
 class TestRaiPlaySubtitles(BaseTestSubtitles):
 class TestRaiPlaySubtitles(BaseTestSubtitles):
@@ -277,6 +321,7 @@ class TestRaiPlaySubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
         self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
 
 
 
 
+@unittest.skip('IE broken - DRM only')
 class TestVikiSubtitles(BaseTestSubtitles):
 class TestVikiSubtitles(BaseTestSubtitles):
     url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
     url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
     IE = VikiIE
     IE = VikiIE
@@ -303,6 +348,7 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
         self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
         self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
 
 
 
 
+@unittest.skip('IE broken')
 class TestThePlatformFeedSubtitles(BaseTestSubtitles):
 class TestThePlatformFeedSubtitles(BaseTestSubtitles):
     url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
     url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
     IE = ThePlatformFeedIE
     IE = ThePlatformFeedIE
@@ -338,7 +384,7 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
         self.assertEqual(set(subtitles.keys()), set(['en']))
         self.assertEqual(set(subtitles.keys()), set(['en']))
-        self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
+        self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045')
 
 
     def test_subtitles_in_page(self):
     def test_subtitles_in_page(self):
         self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
         self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
@@ -346,7 +392,7 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
         self.DL.params['allsubtitles'] = True
         self.DL.params['allsubtitles'] = True
         subtitles = self.getSubtitles()
         subtitles = self.getSubtitles()
         self.assertEqual(set(subtitles.keys()), set(['en']))
         self.assertEqual(set(subtitles.keys()), set(['en']))
-        self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
+        self.assertEqual(md5(subtitles['en']), 'a3cc4c0b5eadd74d9974f1c1f5101045')
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':

+ 6 - 4
test/test_swfinterp.py

@@ -5,16 +5,18 @@ from __future__ import unicode_literals
 import os
 import os
 import sys
 import sys
 import unittest
 import unittest
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
+dirn = os.path.dirname
+
+sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
 
 
 import errno
 import errno
-import io
 import json
 import json
 import re
 import re
 import subprocess
 import subprocess
 
 
 from youtube_dl.swfinterp import SWFInterpreter
 from youtube_dl.swfinterp import SWFInterpreter
+from youtube_dl.compat import compat_open as open
 
 
 
 
 TEST_DIR = os.path.join(
 TEST_DIR = os.path.join(
@@ -43,7 +45,7 @@ def _make_testfunc(testfile):
                     '-static-link-runtime-shared-libraries', as_file])
                     '-static-link-runtime-shared-libraries', as_file])
             except OSError as ose:
             except OSError as ose:
                 if ose.errno == errno.ENOENT:
                 if ose.errno == errno.ENOENT:
-                    print('mxmlc not found! Skipping test.')
+                    self.skipTest('mxmlc not found!')
                     return
                     return
                 raise
                 raise
 
 
@@ -51,7 +53,7 @@ def _make_testfunc(testfile):
             swf_content = swf_f.read()
             swf_content = swf_f.read()
         swfi = SWFInterpreter(swf_content)
         swfi = SWFInterpreter(swf_content)
 
 
-        with io.open(as_file, 'r', encoding='utf-8') as as_f:
+        with open(as_file, 'r', encoding='utf-8') as as_f:
             as_content = as_f.read()
             as_content = as_f.read()
 
 
         def _find_spec(key):
         def _find_spec(key):

+ 509 - 0
test/test_traversal.py

@@ -0,0 +1,509 @@
+#!/usr/bin/env python
+# coding: utf-8
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+import re
+
+from youtube_dl.traversal import (
+    dict_get,
+    get_first,
+    T,
+    traverse_obj,
+)
+from youtube_dl.compat import (
+    compat_etree_fromstring,
+    compat_http_cookies,
+    compat_str,
+)
+from youtube_dl.utils import (
+    int_or_none,
+    str_or_none,
+)
+
+_TEST_DATA = {
+    100: 100,
+    1.2: 1.2,
+    'str': 'str',
+    'None': None,
+    '...': Ellipsis,
+    'urls': [
+        {'index': 0, 'url': 'https://www.example.com/0'},
+        {'index': 1, 'url': 'https://www.example.com/1'},
+    ],
+    'data': (
+        {'index': 2},
+        {'index': 3},
+    ),
+    'dict': {},
+}
+
+
+if sys.version_info < (3, 0):
+    class _TestCase(unittest.TestCase):
+
+        def assertCountEqual(self, *args, **kwargs):
+            return self.assertItemsEqual(*args, **kwargs)
+else:
+    _TestCase = unittest.TestCase
+
+
+class TestTraversal(_TestCase):
+    def assertMaybeCountEqual(self, *args, **kwargs):
+        if sys.version_info < (3, 7):
+            # random dict order
+            return self.assertCountEqual(*args, **kwargs)
+        else:
+            return self.assertEqual(*args, **kwargs)
+
+    def test_traverse_obj(self):
+        # instant compat
+        str = compat_str
+
+        # define a pukka Iterable
+        def iter_range(stop):
+            for from_ in range(stop):
+                yield from_
+
+        # Test base functionality
+        self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
+                         msg='allow tuple path')
+        self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str',
+                         msg='allow list path')
+        self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str',
+                         msg='allow iterable path')
+        self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str',
+                         msg='single items should be treated as a path')
+        self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA)
+        self.assertEqual(traverse_obj(_TEST_DATA, 100), 100)
+        self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2)
+
+        # Test Ellipsis behavior
+        self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis),
+                              (item for item in _TEST_DATA.values() if item not in (None, {})),
+                              msg='`...` should give all non-discarded values')
+        self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(),
+                              msg='`...` selection for dicts should select all values')
+        self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')),
+                         ['https://www.example.com/0', 'https://www.example.com/1'],
+                         msg='nested `...` queries should work')
+        self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4),
+                              msg='`...` query result should be flattened')
+        self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)),
+                         msg='`...` should accept iterables')
+
+        # Test function as key
+        self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
+                         [_TEST_DATA['urls']],
+                         msg='function as query key should perform a filter based on (key, value)')
+        self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)),
+                              msg='exceptions in the query function should be caught')
+        self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2],
+                         msg='function key should accept iterables')
+        if __debug__:
+            with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
+                traverse_obj(_TEST_DATA, lambda a: Ellipsis)
+            with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
+                traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis)
+
+        # Test set as key (transformation/type, like `expected_type`)
+        self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'],
+                         msg='Function in set should be a transformation')
+        self.assertEqual(traverse_obj(_TEST_DATA, ('fail', T(lambda _: 'const'))), 'const',
+                         msg='Function in set should always be called')
+        self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'],
+                         msg='Type in set should be a type filter')
+        self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str, int))), [100, 'str'],
+                                   msg='Multiple types in set should be a type filter')
+        self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA,
+                         msg='A single set should be wrapped into a path')
+        self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'],
+                         msg='Transformation function should not raise')
+        self.assertMaybeCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))),
+                                   [item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
+                                   msg='Function in set should be a transformation')
+        if __debug__:
+            with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
+                traverse_obj(_TEST_DATA, set())
+            with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
+                traverse_obj(_TEST_DATA, set((str.upper, str)))
+
+        # Test `slice` as a key
+        _SLICE_DATA = [0, 1, 2, 3, 4]
+        self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None,
+                         msg='slice on a dictionary should not throw')
+        self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1],
+                         msg='slice key should apply slice to sequence')
+        self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2],
+                         msg='slice key should apply slice to sequence')
+        self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2],
+                         msg='slice key should apply slice to sequence')
+
+        # Test alternative paths
+        self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
+                         msg='multiple `paths` should be treated as alternative paths')
+        self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str',
+                         msg='alternatives should exit early')
+        self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None,
+                         msg='alternatives should return `default` if exhausted')
+        self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, 'fail'), 100), 100,
+                         msg='alternatives should track their own branching return')
+        self.assertEqual(traverse_obj(_TEST_DATA, ('dict', Ellipsis), ('data', Ellipsis)), list(_TEST_DATA['data']),
+                         msg='alternatives on empty objects should search further')
+
+        # Test branch and path nesting
+        self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'],
+                         msg='tuple as key should be treated as branches')
+        self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'],
+                         msg='list as key should be treated as branches')
+        self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'],
+                         msg='double nesting in path should be treated as paths')
+        self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1],
+                         msg='do not fail early on branching')
+        self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))),
+                              ['https://www.example.com/0', 'https://www.example.com/1'],
+                              msg='triple nesting in path should be treated as branches')
+        self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (Ellipsis, 'url')))),
+                         ['https://www.example.com/0', 'https://www.example.com/1'],
+                         msg='ellipsis as branch path start gets flattened')
+
+        # Test dictionary as key
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2},
+                         msg='dict key should result in a dict with the same keys')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}),
+                         {0: 'https://www.example.com/0'},
+                         msg='dict key should allow paths')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}),
+                         {0: ['https://www.example.com/0']},
+                         msg='tuple in dict path should be treated as branches')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}),
+                         {0: ['https://www.example.com/0']},
+                         msg='double nesting in dict path should be treated as paths')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}),
+                         {0: ['https://www.example.com/1', 'https://www.example.com/0']},
+                         msg='triple nesting in dict path should be treated as branches')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
+                         msg='remove `None` values when top level dict key fails')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
+                         msg='use `default` if key fails and `default`')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {},
+                         msg='remove empty values when dict key')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis},
+                         msg='use `default` when dict key and a default')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {},
+                         msg='remove empty values when nested dict key fails')
+        self.assertEqual(traverse_obj(None, {0: 'fail'}), {},
+                         msg='default to dict if pruned')
+        self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
+                         msg='default to dict if pruned and default is given')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}},
+                         msg='use nested `default` when nested dict key fails and `default`')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {},
+                         msg='remove key if branch in dict key not successful')
+
+        # Testing default parameter behavior
+        _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None,
+                         msg='default value should be `None`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=Ellipsis), Ellipsis,
+                         msg='chained fails should result in default')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0,
+                         msg='should not short cirquit on `None`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1,
+                         msg='invalid dict key should result in `default`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1,
+                         msg='`None` is a deliberate sentinel and should become `default`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None,
+                         msg='`IndexError` should result in `default`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=1), 1,
+                         msg='if branched but not successful return `default` if defined, not `[]`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail'), default=None), None,
+                         msg='if branched but not successful return `default` even if `default` is `None`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, (Ellipsis, 'fail')), [],
+                         msg='if branched but not successful return `[]`, not `default`')
+        self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [],
+                         msg='if branched but object is empty return `[]`, not `default`')
+        self.assertEqual(traverse_obj(None, Ellipsis), [],
+                         msg='if branched but object is `None` return `[]`, not `default`')
+        self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [],
+                         msg='if branched but state is `None` return `[]`, not `default`')
+
+        branching_paths = [
+            ('fail', Ellipsis),
+            (Ellipsis, 'fail'),
+            100 * ('fail',) + (Ellipsis,),
+            (Ellipsis,) + 100 * ('fail',),
+        ]
+        for branching_path in branching_paths:
+            self.assertEqual(traverse_obj({}, branching_path), [],
+                             msg='if branched but state is `None`, return `[]` (not `default`)')
+            self.assertEqual(traverse_obj({}, 'fail', branching_path), [],
+                             msg='if branching in last alternative and previous did not match, return `[]` (not `default`)')
+            self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x',
+                             msg='if branching in last alternative and previous did match, return single value')
+            self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x',
+                             msg='if branching in first alternative and non-branching path does match, return single value')
+            self.assertEqual(traverse_obj({}, branching_path, 'fail'), None,
+                             msg='if branching in first alternative and non-branching path does not match, return `default`')
+
+        # Testing expected_type behavior
+        _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
+        self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str),
+                         'str', msg='accept matching `expected_type` type')
+        self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int),
+                         None, msg='reject non-matching `expected_type` type')
+        self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)),
+                         '0', msg='transform type using type function')
+        self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0),
+                         None, msg='wrap expected_type function in try_call')
+        self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str),
+                         ['str'], msg='eliminate items that expected_type fails on')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int),
+                         {0: 100}, msg='type as expected_type should filter dict values')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none),
+                         {0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values')
+        self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int),
+                         1, msg='expected_type should not filter non-final dict values')
+        self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int),
+                         {0: {0: 100}}, msg='expected_type should transform deep dict values')
+        self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)),
+                         [{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values')
+        self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int),
+                         [4], msg='expected_type regression for type matching in tuple branching')
+        self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int),
+                         [], msg='expected_type regression for type matching in dict result')
+
+        # Test get_all behavior
+        _GET_ALL_DATA = {'key': [0, 1, 2]}
+        self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', Ellipsis), get_all=False), 0,
+                         msg='if not `get_all`, return only first matching value')
+        self.assertEqual(traverse_obj(_GET_ALL_DATA, Ellipsis, get_all=False), [0, 1, 2],
+                         msg='do not overflatten if not `get_all`')
+
+        # Test casesense behavior
+        _CASESENSE_DATA = {
+            'KeY': 'value0',
+            0: {
+                'KeY': 'value1',
+                0: {'KeY': 'value2'},
+            },
+            # FULLWIDTH LATIN CAPITAL LETTER K
+            '\uff2bey': 'value3',
+        }
+        self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None,
+                         msg='dict keys should be case sensitive unless `casesense`')
+        self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY',
+                                      casesense=False), 'value0',
+                         msg='allow non matching key case if `casesense`')
+        self.assertEqual(traverse_obj(_CASESENSE_DATA, '\uff4bey',  # FULLWIDTH LATIN SMALL LETTER K
+                                      casesense=False), 'value3',
+                         msg='allow non matching Unicode key case if `casesense`')
+        self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)),
+                                      casesense=False), ['value1'],
+                         msg='allow non matching key case in branch if `casesense`')
+        self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)),
+                                      casesense=False), ['value2'],
+                         msg='allow non matching key case in branch path if `casesense`')
+
+        # Test traverse_string behavior
+        _TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None,
+                         msg='do not traverse into string if not `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0),
+                                      _traverse_string=True), 's',
+                         msg='traverse into string if `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1),
+                                      _traverse_string=True), '.',
+                         msg='traverse into converted data if `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis),
+                                      _traverse_string=True), 'str',
+                         msg='`...` should result in string (same value) if `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)),
+                                      _traverse_string=True), 'sr',
+                         msg='`slice` should result in string if `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'),
+                                      _traverse_string=True), 'str',
+                         msg='function should result in string if `traverse_string`')
+        self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
+                                      _traverse_string=True), ['s', 'r'],
+                         msg='branching should result in list if `traverse_string`')
+        self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [],
+                         msg='branching should result in list if `traverse_string`')
+        self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [],
+                         msg='branching should result in list if `traverse_string`')
+        self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [],
+                         msg='branching should result in list if `traverse_string`')
+
+        # Test re.Match as input obj
+        mobj = re.match(r'^0(12)(?P<group>3)(4)?$', '0123')
+        self.assertEqual(traverse_obj(mobj, Ellipsis), [x for x in mobj.groups() if x is not None],
+                         msg='`...` on a `re.Match` should give its `groups()`')
+        self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'],
+                         msg='function on a `re.Match` should give groupno, value starting at 0')
+        self.assertEqual(traverse_obj(mobj, 'group'), '3',
+                         msg='str key on a `re.Match` should give group with that name')
+        self.assertEqual(traverse_obj(mobj, 2), '3',
+                         msg='int key on a `re.Match` should give group with that name')
+        self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3',
+                         msg='str key on a `re.Match` should respect casesense')
+        self.assertEqual(traverse_obj(mobj, 'fail'), None,
+                         msg='failing str key on a `re.Match` should return `default`')
+        self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None,
+                         msg='failing str key on a `re.Match` should return `default`')
+        self.assertEqual(traverse_obj(mobj, 8), None,
+                         msg='failing int key on a `re.Match` should return `default`')
+        self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
+                         msg='function on a `re.Match` should give group name as well')
+
+        # Test xml.etree.ElementTree.Element as input obj
+        etree = compat_etree_fromstring('''<?xml version="1.0"?>
+        <data>
+            <country name="Liechtenstein">
+                <rank>1</rank>
+                <year>2008</year>
+                <gdppc>141100</gdppc>
+                <neighbor name="Austria" direction="E"/>
+                <neighbor name="Switzerland" direction="W"/>
+            </country>
+            <country name="Singapore">
+                <rank>4</rank>
+                <year>2011</year>
+                <gdppc>59900</gdppc>
+                <neighbor name="Malaysia" direction="N"/>
+            </country>
+            <country name="Panama">
+                <rank>68</rank>
+                <year>2011</year>
+                <gdppc>13600</gdppc>
+                <neighbor name="Costa Rica" direction="W"/>
+                <neighbor name="Colombia" direction="E"/>
+            </country>
+        </data>''')
+        self.assertEqual(traverse_obj(etree, ''), etree,
+                         msg='empty str key should return the element itself')
+        self.assertEqual(traverse_obj(etree, 'country'), list(etree),
+                         msg='str key should return all children with that tag name')
+        self.assertEqual(traverse_obj(etree, Ellipsis), list(etree),
+                         msg='`...` as key should return all children')
+        self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]],
+                         msg='function as key should get element as value')
+        self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]],
+                         msg='function as key should get index as key')
+        self.assertEqual(traverse_obj(etree, 0), etree[0],
+                         msg='int key should return the nth child')
+        self.assertEqual(traverse_obj(etree, './/neighbor/@name'),
+                         ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'],
+                         msg='`@<attribute>` at end of path should give that attribute')
+        self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None],
+                         msg='`@<nonexistent>` at end of path should give `None`')
+        self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'},
+                         msg='`@` should give the full attribute dict')
+        self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
+                         msg='`text()` at end of path should give the inner text')
+        self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
+                         msg='full python xpath features should be supported')
+        self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
+                         msg='special transformations should act on current element')
+        self.assertEqual(traverse_obj(etree, ('country', 0, Ellipsis, 'text()', T(int_or_none))), [1, 2008, 141100],
+                         msg='special transformations should act on current element')
+
+    def test_traversal_unbranching(self):
+        self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), all]), [100, 1.2],
+                         msg='`all` should give all results as list')
+        self.assertEqual(traverse_obj(_TEST_DATA, [(100, 1.2), any]), 100,
+                         msg='`any` should give the first result')
+        self.assertEqual(traverse_obj(_TEST_DATA, [100, all]), [100],
+                         msg='`all` should give list if non branching')
+        self.assertEqual(traverse_obj(_TEST_DATA, [100, any]), 100,
+                         msg='`any` should give single item if non branching')
+        self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]), [100],
+                         msg='`all` should filter `None` and empty dict')
+        self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]), 100,
+                         msg='`any` should filter `None` and empty dict')
+        self.assertEqual(traverse_obj(_TEST_DATA, [{
+            'all': [('dict', 'None', 100, 1.2), all],
+            'any': [('dict', 'None', 100, 1.2), any],
+        }]), {'all': [100, 1.2], 'any': 100},
+            msg='`all`/`any` should apply to each dict path separately')
+        self.assertEqual(traverse_obj(_TEST_DATA, [{
+            'all': [('dict', 'None', 100, 1.2), all],
+            'any': [('dict', 'None', 100, 1.2), any],
+        }], get_all=False), {'all': [100, 1.2], 'any': 100},
+            msg='`all`/`any` should apply to dict regardless of `get_all`')
+        self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, T(float)]), None,
+                      msg='`all` should reset branching status')
+        self.assertIs(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, T(float)]), None,
+                      msg='`any` should reset branching status')
+        self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, Ellipsis, T(float)]), [1.2],
+                         msg='`all` should allow further branching')
+        self.assertEqual(traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, Ellipsis, 'index']), [0, 1],
+                         msg='`any` should allow further branching')
+
+    def test_traversal_morsel(self):
+        values = {
+            'expires': 'a',
+            'path': 'b',
+            'comment': 'c',
+            'domain': 'd',
+            'max-age': 'e',
+            'secure': 'f',
+            'httponly': 'g',
+            'version': 'h',
+            'samesite': 'i',
+        }
+        # SameSite added in Py3.8, breaks .update for 3.5-3.7
+        if sys.version_info < (3, 8):
+            del values['samesite']
+        morsel = compat_http_cookies.Morsel()
+        morsel.set(str('item_key'), 'item_value', 'coded_value')
+        morsel.update(values)
+        values['key'] = str('item_key')
+        values['value'] = 'item_value'
+        values = dict((str(k), v) for k, v in values.items())
+        # make test pass even without ordered dict
+        value_set = set(values.values())
+
+        for key, value in values.items():
+            self.assertEqual(traverse_obj(morsel, key), value,
+                             msg='Morsel should provide access to all values')
+        self.assertEqual(set(traverse_obj(morsel, Ellipsis)), value_set,
+                         msg='`...` should yield all values')
+        self.assertEqual(set(traverse_obj(morsel, lambda k, v: True)), value_set,
+                         msg='function key should yield all values')
+        self.assertIs(traverse_obj(morsel, [(None,), any]), morsel,
+                      msg='Morsel should not be implicitly changed to dict on usage')
+
+    def test_get_first(self):
+        self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam')
+
+    def test_dict_get(self):
+        FALSE_VALUES = {
+            'none': None,
+            'false': False,
+            'zero': 0,
+            'empty_string': '',
+            'empty_list': [],
+        }
+        d = FALSE_VALUES.copy()
+        d['a'] = 42
+        self.assertEqual(dict_get(d, 'a'), 42)
+        self.assertEqual(dict_get(d, 'b'), None)
+        self.assertEqual(dict_get(d, 'b', 42), 42)
+        self.assertEqual(dict_get(d, ('a', )), 42)
+        self.assertEqual(dict_get(d, ('b', 'a', )), 42)
+        self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
+        self.assertEqual(dict_get(d, ('b', 'c', )), None)
+        self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
+        for key, false_value in FALSE_VALUES.items():
+            self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
+            self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 8 - 5
test/test_unicode_literals.py

@@ -2,19 +2,21 @@ from __future__ import unicode_literals
 
 
 # Allow direct execution
 # Allow direct execution
 import os
 import os
+import re
 import sys
 import sys
 import unittest
 import unittest
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-import io
-import re
+dirn = os.path.dirname
+
+rootDir = dirn(dirn(os.path.abspath(__file__)))
 
 
-rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, rootDir)
 
 
 IGNORED_FILES = [
 IGNORED_FILES = [
     'setup.py',  # http://bugs.python.org/issue13943
     'setup.py',  # http://bugs.python.org/issue13943
     'conf.py',
     'conf.py',
     'buildserver.py',
     'buildserver.py',
+    'get-pip.py',
 ]
 ]
 
 
 IGNORED_DIRS = [
 IGNORED_DIRS = [
@@ -23,6 +25,7 @@ IGNORED_DIRS = [
 ]
 ]
 
 
 from test.helper import assertRegexpMatches
 from test.helper import assertRegexpMatches
+from youtube_dl.compat import compat_open as open
 
 
 
 
 class TestUnicodeLiterals(unittest.TestCase):
 class TestUnicodeLiterals(unittest.TestCase):
@@ -40,7 +43,7 @@ class TestUnicodeLiterals(unittest.TestCase):
                     continue
                     continue
 
 
                 fn = os.path.join(dirpath, basename)
                 fn = os.path.join(dirpath, basename)
-                with io.open(fn, encoding='utf-8') as inf:
+                with open(fn, encoding='utf-8') as inf:
                     code = inf.read()
                     code = inf.read()
 
 
                 if "'" not in code and '"' not in code:
                 if "'" not in code and '"' not in code:

+ 309 - 61
test/test_utils.py

@@ -12,13 +12,16 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
 # Various small unit tests
 # Various small unit tests
 import io
 import io
+import itertools
 import json
 import json
+import types
 import xml.etree.ElementTree
 import xml.etree.ElementTree
 
 
 from youtube_dl.utils import (
 from youtube_dl.utils import (
+    _UnsafeExtensionError,
     age_restricted,
     age_restricted,
     args_to_str,
     args_to_str,
-    encode_base_n,
+    base_url,
     caesar,
     caesar,
     clean_html,
     clean_html,
     clean_podcast_url,
     clean_podcast_url,
@@ -26,11 +29,12 @@ from youtube_dl.utils import (
     DateRange,
     DateRange,
     detect_exe_version,
     detect_exe_version,
     determine_ext,
     determine_ext,
-    dict_get,
+    encode_base_n,
     encode_compat_str,
     encode_compat_str,
     encodeFilename,
     encodeFilename,
     escape_rfc3986,
     escape_rfc3986,
     escape_url,
     escape_url,
+    expand_path,
     extract_attributes,
     extract_attributes,
     ExtractorError,
     ExtractorError,
     find_xpath_attr,
     find_xpath_attr,
@@ -44,8 +48,11 @@ from youtube_dl.utils import (
     int_or_none,
     int_or_none,
     intlist_to_bytes,
     intlist_to_bytes,
     is_html,
     is_html,
+    join_nonempty,
     js_to_json,
     js_to_json,
+    LazyList,
     limit_length,
     limit_length,
+    lowercase_escape,
     merge_dicts,
     merge_dicts,
     mimetype2ext,
     mimetype2ext,
     month_by_name,
     month_by_name,
@@ -54,24 +61,26 @@ from youtube_dl.utils import (
     OnDemandPagedList,
     OnDemandPagedList,
     orderedSet,
     orderedSet,
     parse_age_limit,
     parse_age_limit,
+    parse_bitrate,
     parse_duration,
     parse_duration,
     parse_filesize,
     parse_filesize,
+    parse_codecs,
     parse_count,
     parse_count,
     parse_iso8601,
     parse_iso8601,
     parse_resolution,
     parse_resolution,
-    parse_bitrate,
+    parse_qs,
     pkcs1pad,
     pkcs1pad,
-    read_batch_urls,
-    sanitize_filename,
-    sanitize_path,
-    sanitize_url,
-    expand_path,
     prepend_extension,
     prepend_extension,
-    replace_extension,
+    read_batch_urls,
     remove_start,
     remove_start,
     remove_end,
     remove_end,
     remove_quotes,
     remove_quotes,
+    replace_extension,
     rot47,
     rot47,
+    sanitize_filename,
+    sanitize_path,
+    sanitize_url,
+    sanitized_Request,
     shell_quote,
     shell_quote,
     smuggle_url,
     smuggle_url,
     str_to_int,
     str_to_int,
@@ -79,19 +88,19 @@ from youtube_dl.utils import (
     strip_or_none,
     strip_or_none,
     subtitles_filename,
     subtitles_filename,
     timeconvert,
     timeconvert,
+    try_call,
     unescapeHTML,
     unescapeHTML,
     unified_strdate,
     unified_strdate,
     unified_timestamp,
     unified_timestamp,
     unsmuggle_url,
     unsmuggle_url,
     uppercase_escape,
     uppercase_escape,
-    lowercase_escape,
     url_basename,
     url_basename,
     url_or_none,
     url_or_none,
-    base_url,
     urljoin,
     urljoin,
     urlencode_postdata,
     urlencode_postdata,
     urshift,
     urshift,
     update_url_query,
     update_url_query,
+    variadic,
     version_tuple,
     version_tuple,
     xpath_with_ns,
     xpath_with_ns,
     xpath_element,
     xpath_element,
@@ -104,7 +113,7 @@ from youtube_dl.utils import (
     cli_option,
     cli_option,
     cli_valueless_option,
     cli_valueless_option,
     cli_bool_option,
     cli_bool_option,
-    parse_codecs,
+    YoutubeDLHandler,
 )
 )
 from youtube_dl.compat import (
 from youtube_dl.compat import (
     compat_chr,
     compat_chr,
@@ -112,12 +121,13 @@ from youtube_dl.compat import (
     compat_getenv,
     compat_getenv,
     compat_os_name,
     compat_os_name,
     compat_setenv,
     compat_setenv,
+    compat_str,
     compat_urlparse,
     compat_urlparse,
-    compat_parse_qs,
 )
 )
 
 
 
 
 class TestUtil(unittest.TestCase):
 class TestUtil(unittest.TestCase):
+
     def test_timeconvert(self):
     def test_timeconvert(self):
         self.assertTrue(timeconvert('') is None)
         self.assertTrue(timeconvert('') is None)
         self.assertTrue(timeconvert('bougrg') is None)
         self.assertTrue(timeconvert('bougrg') is None)
@@ -236,6 +246,19 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
         self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
         self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
         self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
         self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
         self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
+        self.assertEqual(sanitize_url('foo bar'), 'foo bar')
+
+    def test_sanitized_Request(self):
+        self.assertFalse(sanitized_Request('http://foo.bar').has_header('Authorization'))
+        self.assertFalse(sanitized_Request('http://:foo.bar').has_header('Authorization'))
+        self.assertEqual(sanitized_Request('http://@foo.bar').get_header('Authorization'),
+                         'Basic Og==')
+        self.assertEqual(sanitized_Request('http://:pass@foo.bar').get_header('Authorization'),
+                         'Basic OnBhc3M=')
+        self.assertEqual(sanitized_Request('http://user:@foo.bar').get_header('Authorization'),
+                         'Basic dXNlcjo=')
+        self.assertEqual(sanitized_Request('http://user:pass@foo.bar').get_header('Authorization'),
+                         'Basic dXNlcjpwYXNz')
 
 
     def test_expand_path(self):
     def test_expand_path(self):
         def env(var):
         def env(var):
@@ -249,6 +272,27 @@ class TestUtil(unittest.TestCase):
             expand_path('~/%s' % env('YOUTUBE_DL_EXPATH_PATH')),
             expand_path('~/%s' % env('YOUTUBE_DL_EXPATH_PATH')),
             '%s/expanded' % compat_getenv('HOME'))
             '%s/expanded' % compat_getenv('HOME'))
 
 
+    _uncommon_extensions = [
+        ('exe', 'abc.exe.ext'),
+        ('de', 'abc.de.ext'),
+        ('../.mp4', None),
+        ('..\\.mp4', None),
+    ]
+
+    def assertUnsafeExtension(self, ext=None):
+        assert_raises = self.assertRaises(_UnsafeExtensionError)
+        assert_raises.ext = ext
+        orig_exit = assert_raises.__exit__
+
+        def my_exit(self_, exc_type, exc_val, exc_tb):
+            did_raise = orig_exit(exc_type, exc_val, exc_tb)
+            if did_raise and assert_raises.ext is not None:
+                self.assertEqual(assert_raises.ext, assert_raises.exception.extension, 'Unsafe extension  not as unexpected')
+            return did_raise
+
+        assert_raises.__exit__ = types.MethodType(my_exit, assert_raises)
+        return assert_raises
+
     def test_prepend_extension(self):
     def test_prepend_extension(self):
         self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
         self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
         self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
         self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
@@ -257,6 +301,19 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
         self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
         self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
         self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
 
 
+        # Test uncommon extensions
+        self.assertEqual(prepend_extension('abc.ext', 'bin'), 'abc.bin.ext')
+        for ext, result in self._uncommon_extensions:
+            with self.assertUnsafeExtension(ext):
+                prepend_extension('abc', ext)
+            if result:
+                self.assertEqual(prepend_extension('abc.ext', ext, 'ext'), result)
+            else:
+                with self.assertUnsafeExtension(ext):
+                    prepend_extension('abc.ext', ext, 'ext')
+            with self.assertUnsafeExtension(ext):
+                prepend_extension('abc.unexpected_ext', ext, 'ext')
+
     def test_replace_extension(self):
     def test_replace_extension(self):
         self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
         self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
         self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
         self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
@@ -265,6 +322,16 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
         self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
         self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
         self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
 
 
+        # Test uncommon extensions
+        self.assertEqual(replace_extension('abc.ext', 'bin'), 'abc.unknown_video')
+        for ext, _ in self._uncommon_extensions:
+            with self.assertUnsafeExtension(ext):
+                replace_extension('abc', ext)
+            with self.assertUnsafeExtension(ext):
+                replace_extension('abc.ext', ext, 'ext')
+            with self.assertUnsafeExtension(ext):
+                replace_extension('abc.unexpected_ext', ext, 'ext')
+
     def test_subtitles_filename(self):
     def test_subtitles_filename(self):
         self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
         self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
         self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
         self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
@@ -370,6 +437,9 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
         self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
         self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
         self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
         self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
         self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
+        self.assertEqual(unified_timestamp('December 31 1969 20:00:01 EDT'), 1)
+        self.assertEqual(unified_timestamp('Wednesday 31 December 1969 18:01:26 MDT'), 86)
+        self.assertEqual(unified_timestamp('12/31/1969 20:01:18 EDT', False), 78)
 
 
     def test_determine_ext(self):
     def test_determine_ext(self):
         self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
         self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
@@ -491,11 +561,14 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(float_or_none(set()), None)
         self.assertEqual(float_or_none(set()), None)
 
 
     def test_int_or_none(self):
     def test_int_or_none(self):
+        self.assertEqual(int_or_none(42), 42)
         self.assertEqual(int_or_none('42'), 42)
         self.assertEqual(int_or_none('42'), 42)
         self.assertEqual(int_or_none(''), None)
         self.assertEqual(int_or_none(''), None)
         self.assertEqual(int_or_none(None), None)
         self.assertEqual(int_or_none(None), None)
         self.assertEqual(int_or_none([]), None)
         self.assertEqual(int_or_none([]), None)
         self.assertEqual(int_or_none(set()), None)
         self.assertEqual(int_or_none(set()), None)
+        self.assertEqual(int_or_none('42', base=8), 34)
+        self.assertRaises(TypeError, int_or_none(42, base=8))
 
 
     def test_str_to_int(self):
     def test_str_to_int(self):
         self.assertEqual(str_to_int('123,456'), 123456)
         self.assertEqual(str_to_int('123,456'), 123456)
@@ -662,38 +735,36 @@ class TestUtil(unittest.TestCase):
         self.assertTrue(isinstance(data, bytes))
         self.assertTrue(isinstance(data, bytes))
 
 
     def test_update_url_query(self):
     def test_update_url_query(self):
-        def query_dict(url):
-            return compat_parse_qs(compat_urlparse.urlparse(url).query)
-        self.assertEqual(query_dict(update_url_query(
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
             'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
-            query_dict('http://example.com/path?quality=HD&format=mp4'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?quality=HD&format=mp4'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
             'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
-            query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': 'id,formats,subtitles'})),
             'http://example.com/path', {'fields': 'id,formats,subtitles'})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
             'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path?manifest=f4m', {'manifest': []})),
             'http://example.com/path?manifest=f4m', {'manifest': []})),
-            query_dict('http://example.com/path'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
             'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
-            query_dict('http://example.com/path?system=LINUX'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?system=LINUX'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'fields': b'id,formats,subtitles'})),
             'http://example.com/path', {'fields': b'id,formats,subtitles'})),
-            query_dict('http://example.com/path?fields=id,formats,subtitles'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?fields=id,formats,subtitles'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'width': 1080, 'height': 720})),
             'http://example.com/path', {'width': 1080, 'height': 720})),
-            query_dict('http://example.com/path?width=1080&height=720'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?width=1080&height=720'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'bitrate': 5020.43})),
             'http://example.com/path', {'bitrate': 5020.43})),
-            query_dict('http://example.com/path?bitrate=5020.43'))
-        self.assertEqual(query_dict(update_url_query(
+            parse_qs('http://example.com/path?bitrate=5020.43'))
+        self.assertEqual(parse_qs(update_url_query(
             'http://example.com/path', {'test': '第二行тест'})),
             'http://example.com/path', {'test': '第二行тест'})),
-            query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
+            parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
 
 
     def test_multipart_encode(self):
     def test_multipart_encode(self):
         self.assertEqual(
         self.assertEqual(
@@ -705,28 +776,6 @@ class TestUtil(unittest.TestCase):
         self.assertRaises(
         self.assertRaises(
             ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
             ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
 
 
-    def test_dict_get(self):
-        FALSE_VALUES = {
-            'none': None,
-            'false': False,
-            'zero': 0,
-            'empty_string': '',
-            'empty_list': [],
-        }
-        d = FALSE_VALUES.copy()
-        d['a'] = 42
-        self.assertEqual(dict_get(d, 'a'), 42)
-        self.assertEqual(dict_get(d, 'b'), None)
-        self.assertEqual(dict_get(d, 'b', 42), 42)
-        self.assertEqual(dict_get(d, ('a', )), 42)
-        self.assertEqual(dict_get(d, ('b', 'a', )), 42)
-        self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
-        self.assertEqual(dict_get(d, ('b', 'c', )), None)
-        self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
-        for key, false_value in FALSE_VALUES.items():
-            self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
-            self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
-
     def test_merge_dicts(self):
     def test_merge_dicts(self):
         self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
         self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
         self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
         self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
@@ -885,6 +934,111 @@ class TestUtil(unittest.TestCase):
         )
         )
         self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
         self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
 
 
+    def test_remove_dot_segments(self):
+
+        def remove_dot_segments(p):
+            q = '' if p.startswith('/') else '/'
+            p = 'http://example.com' + q + p
+            p = compat_urlparse.urlsplit(YoutubeDLHandler._fix_path(p)).path
+            return p[1:] if q else p
+
+        self.assertEqual(remove_dot_segments('/a/b/c/./../../g'), '/a/g')
+        self.assertEqual(remove_dot_segments('mid/content=5/../6'), 'mid/6')
+        self.assertEqual(remove_dot_segments('/ad/../cd'), '/cd')
+        self.assertEqual(remove_dot_segments('/ad/../cd/'), '/cd/')
+        self.assertEqual(remove_dot_segments('/..'), '/')
+        self.assertEqual(remove_dot_segments('/./'), '/')
+        self.assertEqual(remove_dot_segments('/./a'), '/a')
+        self.assertEqual(remove_dot_segments('/abc/./.././d/././e/.././f/./../../ghi'), '/ghi')
+        self.assertEqual(remove_dot_segments('/'), '/')
+        self.assertEqual(remove_dot_segments('/t'), '/t')
+        self.assertEqual(remove_dot_segments('t'), 't')
+        self.assertEqual(remove_dot_segments(''), '')
+        self.assertEqual(remove_dot_segments('/../a/b/c'), '/a/b/c')
+        self.assertEqual(remove_dot_segments('../a'), 'a')
+        self.assertEqual(remove_dot_segments('./a'), 'a')
+        self.assertEqual(remove_dot_segments('.'), '')
+        self.assertEqual(remove_dot_segments('////'), '////')
+
+    def test_js_to_json_vars_strings(self):
+        self.assertDictEqual(
+            json.loads(js_to_json(
+                '''{
+                    'null': a,
+                    'nullStr': b,
+                    'true': c,
+                    'trueStr': d,
+                    'false': e,
+                    'falseStr': f,
+                    'unresolvedVar': g,
+                }''',
+                {
+                    'a': 'null',
+                    'b': '"null"',
+                    'c': 'true',
+                    'd': '"true"',
+                    'e': 'false',
+                    'f': '"false"',
+                    'g': 'var',
+                }
+            )),
+            {
+                'null': None,
+                'nullStr': 'null',
+                'true': True,
+                'trueStr': 'true',
+                'false': False,
+                'falseStr': 'false',
+                'unresolvedVar': 'var'
+            }
+        )
+
+        self.assertDictEqual(
+            json.loads(js_to_json(
+                '''{
+                    'int': a,
+                    'intStr': b,
+                    'float': c,
+                    'floatStr': d,
+                }''',
+                {
+                    'a': '123',
+                    'b': '"123"',
+                    'c': '1.23',
+                    'd': '"1.23"',
+                }
+            )),
+            {
+                'int': 123,
+                'intStr': '123',
+                'float': 1.23,
+                'floatStr': '1.23',
+            }
+        )
+
+        self.assertDictEqual(
+            json.loads(js_to_json(
+                '''{
+                    'object': a,
+                    'objectStr': b,
+                    'array': c,
+                    'arrayStr': d,
+                }''',
+                {
+                    'a': '{}',
+                    'b': '"{}"',
+                    'c': '[]',
+                    'd': '"[]"',
+                }
+            )),
+            {
+                'object': {},
+                'objectStr': '{}',
+                'array': [],
+                'arrayStr': '[]',
+            }
+        )
+
     def test_js_to_json_realworld(self):
     def test_js_to_json_realworld(self):
         inp = '''{
         inp = '''{
             'clip':{'provider':'pseudo'}
             'clip':{'provider':'pseudo'}
@@ -955,10 +1109,10 @@ class TestUtil(unittest.TestCase):
             !42: 42
             !42: 42
         }''')
         }''')
         self.assertEqual(json.loads(on), {
         self.assertEqual(json.loads(on), {
-            'a': 0,
-            'b': 1,
-            'c': 0,
-            'd': 42.42,
+            'a': True,
+            'b': False,
+            'c': False,
+            'd': True,
             'e': [],
             'e': [],
             'f': "abc",
             'f': "abc",
             'g': "",
             'g': "",
@@ -1028,10 +1182,26 @@ class TestUtil(unittest.TestCase):
         on = js_to_json('{ "040": "040" }')
         on = js_to_json('{ "040": "040" }')
         self.assertEqual(json.loads(on), {'040': '040'})
         self.assertEqual(json.loads(on), {'040': '040'})
 
 
+        on = js_to_json('[1,//{},\n2]')
+        self.assertEqual(json.loads(on), [1, 2])
+
+        on = js_to_json(r'"\^\$\#"')
+        self.assertEqual(json.loads(on), R'^$#', msg='Unnecessary escapes should be stripped')
+
+        on = js_to_json('\'"\\""\'')
+        self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
+
     def test_js_to_json_malformed(self):
     def test_js_to_json_malformed(self):
         self.assertEqual(js_to_json('42a1'), '42"a1"')
         self.assertEqual(js_to_json('42a1'), '42"a1"')
         self.assertEqual(js_to_json('42a-1'), '42"a"-1')
         self.assertEqual(js_to_json('42a-1'), '42"a"-1')
 
 
+    def test_js_to_json_template_literal(self):
+        self.assertEqual(js_to_json('`Hello ${name}`', {'name': '"world"'}), '"Hello world"')
+        self.assertEqual(js_to_json('`${name}${name}`', {'name': '"X"'}), '"XX"')
+        self.assertEqual(js_to_json('`${name}${name}`', {'name': '5'}), '"55"')
+        self.assertEqual(js_to_json('`${name}"${name}"`', {'name': '5'}), '"5\\"5\\""')
+        self.assertEqual(js_to_json('`${name}`', {}), '"name"')
+
     def test_extract_attributes(self):
     def test_extract_attributes(self):
         self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
         self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
         self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
         self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
@@ -1475,6 +1645,84 @@ Line 1
         self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
         self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
         self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
         self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
 
 
+    def test_LazyList(self):
+        it = list(range(10))
+
+        self.assertEqual(list(LazyList(it)), it)
+        self.assertEqual(LazyList(it).exhaust(), it)
+        self.assertEqual(LazyList(it)[5], it[5])
+
+        self.assertEqual(LazyList(it)[5:], it[5:])
+        self.assertEqual(LazyList(it)[:5], it[:5])
+        self.assertEqual(LazyList(it)[::2], it[::2])
+        self.assertEqual(LazyList(it)[1::2], it[1::2])
+        self.assertEqual(LazyList(it)[5::-1], it[5::-1])
+        self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
+        self.assertEqual(LazyList(it)[::-1], it[::-1])
+
+        self.assertTrue(LazyList(it))
+        self.assertFalse(LazyList(range(0)))
+        self.assertEqual(len(LazyList(it)), len(it))
+        self.assertEqual(repr(LazyList(it)), repr(it))
+        self.assertEqual(compat_str(LazyList(it)), compat_str(it))
+
+        self.assertEqual(list(LazyList(it, reverse=True)), it[::-1])
+        self.assertEqual(list(reversed(LazyList(it))[::-1]), it)
+        self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
+
+    def test_LazyList_laziness(self):
+
+        def test(ll, idx, val, cache):
+            self.assertEqual(ll[idx], val)
+            self.assertEqual(ll._cache, list(cache))
+
+        ll = LazyList(range(10))
+        test(ll, 0, 0, range(1))
+        test(ll, 5, 5, range(6))
+        test(ll, -3, 7, range(10))
+
+        ll = LazyList(range(10), reverse=True)
+        test(ll, -1, 0, range(1))
+        test(ll, 3, 6, range(10))
+
+        ll = LazyList(itertools.count())
+        test(ll, 10, 10, range(11))
+        ll = reversed(ll)
+        test(ll, -15, 14, range(15))
+
+    def test_try_call(self):
+        def total(*x, **kwargs):
+            return sum(x) + sum(kwargs.values())
+
+        self.assertEqual(try_call(None), None,
+                         msg='not a fn should give None')
+        self.assertEqual(try_call(lambda: 1), 1,
+                         msg='int fn with no expected_type should give int')
+        self.assertEqual(try_call(lambda: 1, expected_type=int), 1,
+                         msg='int fn with expected_type int should give int')
+        self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
+                         msg='int fn with wrong expected_type should give None')
+        self.assertEqual(try_call(total, args=(0, 1, 0, ), expected_type=int), 1,
+                         msg='fn should accept arglist')
+        self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1,
+                         msg='fn should accept kwargs')
+        self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
+                         msg='int fn with no expected_type should give None')
+        self.assertEqual(try_call(lambda x: {}, total, args=(42, ), expected_type=int), 42,
+                         msg='expect first int result with expected_type int')
+
+    def test_variadic(self):
+        self.assertEqual(variadic(None), (None, ))
+        self.assertEqual(variadic('spam'), ('spam', ))
+        self.assertEqual(variadic('spam', allowed_types=dict), 'spam')
+        self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
+
+    def test_join_nonempty(self):
+        self.assertEqual(join_nonempty('a', 'b'), 'a-b')
+        self.assertEqual(join_nonempty(
+            'a', 'b', 'c', 'd',
+            from_dict={'a': 'c', 'c': [], 'b': 'd', 'd': None}), 'c-d')
+
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
     unittest.main()
     unittest.main()

+ 2 - 3
test/test_write_annotations.py

@@ -11,12 +11,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 from test.helper import get_params, try_rm
 from test.helper import get_params, try_rm
 
 
 
 
-import io
-
 import xml.etree.ElementTree
 import xml.etree.ElementTree
 
 
 import youtube_dl.YoutubeDL
 import youtube_dl.YoutubeDL
 import youtube_dl.extractor
 import youtube_dl.extractor
+from youtube_dl.compat import compat_open as open
 
 
 
 
 class YoutubeDL(youtube_dl.YoutubeDL):
 class YoutubeDL(youtube_dl.YoutubeDL):
@@ -51,7 +50,7 @@ class TestAnnotations(unittest.TestCase):
         ydl.download([TEST_ID])
         ydl.download([TEST_ID])
         self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
         self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
         annoxml = None
         annoxml = None
-        with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
+        with open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
             annoxml = xml.etree.ElementTree.parse(annof)
             annoxml = xml.etree.ElementTree.parse(annof)
         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
         self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
         root = annoxml.getroot()
         root = annoxml.getroot()

+ 96 - 3
test/test_youtube_signature.py

@@ -8,14 +8,18 @@ import sys
 import unittest
 import unittest
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
-import io
 import re
 import re
 import string
 import string
 
 
+from youtube_dl.compat import (
+    compat_open as open,
+    compat_str,
+    compat_urlretrieve,
+)
+
 from test.helper import FakeYDL
 from test.helper import FakeYDL
 from youtube_dl.extractor import YoutubeIE
 from youtube_dl.extractor import YoutubeIE
 from youtube_dl.jsinterp import JSInterpreter
 from youtube_dl.jsinterp import JSInterpreter
-from youtube_dl.compat import compat_str, compat_urlretrieve
 
 
 _SIG_TESTS = [
 _SIG_TESTS = [
     (
     (
@@ -66,6 +70,10 @@ _SIG_TESTS = [
 ]
 ]
 
 
 _NSIG_TESTS = [
 _NSIG_TESTS = [
+    (
+        'https://www.youtube.com/s/player/7862ca1f/player_ias.vflset/en_US/base.js',
+        'X_LCxVDjAavgE5t', 'yxJ1dM6iz5ogUg',
+    ),
     (
     (
         'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js',
         'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js',
         'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w',
         'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w',
@@ -90,12 +98,97 @@ _NSIG_TESTS = [
         'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js',
         'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js',
         'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw',
         'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw',
     ),
     ),
+    (
+        'https://www.youtube.com/s/player/5dd88d1d/player-plasma-ias-phone-en_US.vflset/base.js',
+        'kSxKFLeqzv_ZyHSAt', 'n8gS8oRlHOxPFA',
+    ),
+    (
+        'https://www.youtube.com/s/player/324f67b9/player_ias.vflset/en_US/base.js',
+        'xdftNy7dh9QGnhW', '22qLGxrmX8F1rA',
+    ),
+    (
+        'https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js',
+        'TDCstCG66tEAO5pR9o', 'dbxNtZ14c-yWyw',
+    ),
+    (
+        'https://www.youtube.com/s/player/c81bbb4a/player_ias.vflset/en_US/base.js',
+        'gre3EcLurNY2vqp94', 'Z9DfGxWP115WTg',
+    ),
+    (
+        'https://www.youtube.com/s/player/1f7d5369/player_ias.vflset/en_US/base.js',
+        'batNX7sYqIJdkJ', 'IhOkL_zxbkOZBw',
+    ),
+    (
+        'https://www.youtube.com/s/player/009f1d77/player_ias.vflset/en_US/base.js',
+        '5dwFHw8aFWQUQtffRq', 'audescmLUzI3jw',
+    ),
+    (
+        'https://www.youtube.com/s/player/dc0c6770/player_ias.vflset/en_US/base.js',
+        '5EHDMgYLV6HPGk_Mu-kk', 'n9lUJLHbxUI0GQ',
+    ),
+    (
+        'https://www.youtube.com/s/player/c2199353/player_ias.vflset/en_US/base.js',
+        '5EHDMgYLV6HPGk_Mu-kk', 'AD5rgS85EkrE7',
+    ),
+    (
+        'https://www.youtube.com/s/player/113ca41c/player_ias.vflset/en_US/base.js',
+        'cgYl-tlYkhjT7A', 'hI7BBr2zUgcmMg',
+    ),
+    (
+        'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js',
+        '-Txvy6bT5R6LqgnQNx', 'dcklJCnRUHbgSg',
+    ),
+    (
+        'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js',
+        'B2j7f_UPT4rfje85Lu_e', 'm5DmNymaGQ5RdQ',
+    ),
+    (
+        'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js',
+        'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A',
+    ),
+    (
+        'https://www.youtube.com/s/player/6f20102c/player_ias.vflset/en_US/base.js',
+        'lE8DhoDmKqnmJJ', 'pJTTX6XyJP2BYw',
+    ),
+    (
+        'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
+        'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ',
+    ),
+    (
+        'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
+        '_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
+    ),
+    (
+        'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js',
+        '1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg',
+    ),
+    (
+        'https://www.youtube.com/s/player/b22ef6e7/player_ias.vflset/en_US/base.js',
+        'b6HcntHGkvBLk_FRf', 'kNPW6A7FyP2l8A',
+    ),
+    (
+        'https://www.youtube.com/s/player/3400486c/player_ias.vflset/en_US/base.js',
+        'lL46g3XifCKUZn1Xfw', 'z767lhet6V2Skl',
+    ),
+    (
+        'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js',
+        '7X-he4jjvMx7BCX', 'sViSydX8IHtdWA',
+    ),
+    (
+        'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js',
+        '-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw',
+    ),
+    (
+        'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js',
+        'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw',
+    ),
 ]
 ]
 
 
 
 
 class TestPlayerInfo(unittest.TestCase):
 class TestPlayerInfo(unittest.TestCase):
     def test_youtube_extract_player_info(self):
     def test_youtube_extract_player_info(self):
         PLAYER_URLS = (
         PLAYER_URLS = (
+            ('https://www.youtube.com/s/player/4c3f79c5/player_ias.vflset/en_US/base.js', '4c3f79c5'),
             ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
             ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
             ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
             ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
             ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
             ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
@@ -142,7 +235,7 @@ def t_factory(name, sig_func, url_pattern):
 
 
             if not os.path.exists(fn):
             if not os.path.exists(fn):
                 compat_urlretrieve(url, fn)
                 compat_urlretrieve(url, fn)
-            with io.open(fn, encoding='utf-8') as testf:
+            with open(fn, encoding='utf-8') as testf:
                 jscode = testf.read()
                 jscode = testf.read()
             self.assertEqual(sig_func(jscode, sig_input), expected_sig)
             self.assertEqual(sig_func(jscode, sig_input), expected_sig)
 
 

+ 35 - 0
test/testdata/mpd/range_only.mpd

@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<!-- MPD file Generated with GPAC version 1.0.1-revrelease at 2021-11-27T20:53:11.690Z -->
+<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" minBufferTime="PT1.500S" type="static" mediaPresentationDuration="PT0H0M30.196S" maxSegmentDuration="PT0H0M10.027S" profiles="urn:mpeg:dash:profile:full:2011">
+ <ProgramInformation moreInformationURL="http://gpac.io">
+  <Title>manifest.mpd generated by GPAC</Title>
+ </ProgramInformation>
+
+ <Period duration="PT0H0M30.196S">
+  <AdaptationSet segmentAlignment="true" maxWidth="768" maxHeight="432" maxFrameRate="30000/1001" par="16:9" lang="und" startWithSAP="1">
+   <Representation id="1" mimeType="video/mp4" codecs="avc1.4D401E" width="768" height="432" frameRate="30000/1001" sar="1:1" bandwidth="526987">
+    <BaseURL>video_dashinit.mp4</BaseURL>
+    <SegmentList timescale="90000" duration="900000">
+     <Initialization range="0-881"/>
+     <SegmentURL mediaRange="882-876094" indexRange="882-925"/>
+     <SegmentURL mediaRange="876095-1466732" indexRange="876095-876138"/>
+     <SegmentURL mediaRange="1466733-1953615" indexRange="1466733-1466776"/>
+     <SegmentURL mediaRange="1953616-1994211" indexRange="1953616-1953659"/>
+    </SegmentList>
+   </Representation>
+  </AdaptationSet>
+  <AdaptationSet segmentAlignment="true" lang="und" startWithSAP="1">
+   <Representation id="2" mimeType="audio/mp4" codecs="mp4a.40.2" audioSamplingRate="48000" bandwidth="98096">
+    <AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
+    <BaseURL>audio_dashinit.mp4</BaseURL>
+    <SegmentList timescale="48000" duration="480000">
+     <Initialization range="0-752"/>
+     <SegmentURL mediaRange="753-124129" indexRange="753-796"/>
+     <SegmentURL mediaRange="124130-250544" indexRange="124130-124173"/>
+     <SegmentURL mediaRange="250545-374929" indexRange="250545-250588"/>
+    </SegmentList>
+   </Representation>
+  </AdaptationSet>
+ </Period>
+</MPD>
+

+ 351 - 0
test/testdata/mpd/subtitles.mpd

@@ -0,0 +1,351 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Created with Unified Streaming Platform (version=1.10.18-20255) -->
+<MPD
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xmlns="urn:mpeg:dash:schema:mpd:2011"
+  xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd"
+  type="static"
+  mediaPresentationDuration="PT14M48S"
+  maxSegmentDuration="PT1M"
+  minBufferTime="PT10S"
+  profiles="urn:mpeg:dash:profile:isoff-live:2011">
+  <Period
+    id="1"
+    duration="PT14M48S">
+    <BaseURL>dash/</BaseURL>
+    <AdaptationSet
+      id="1"
+      group="1"
+      contentType="audio"
+      segmentAlignment="true"
+      audioSamplingRate="48000"
+      mimeType="audio/mp4"
+      codecs="mp4a.40.2"
+      startWithSAP="1">
+      <AudioChannelConfiguration
+        schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011"
+        value="2" />
+      <Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
+      <SegmentTemplate
+        timescale="48000"
+        initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
+        media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
+        <SegmentTimeline>
+          <S t="0" d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="96256" r="2" />
+          <S d="95232" />
+          <S d="3584" />
+        </SegmentTimeline>
+      </SegmentTemplate>
+      <Representation
+        id="audio=128001"
+        bandwidth="128001">
+      </Representation>
+    </AdaptationSet>
+    <AdaptationSet
+      id="2"
+      group="3"
+      contentType="text"
+      lang="en"
+      mimeType="application/mp4"
+      codecs="stpp"
+      startWithSAP="1">
+      <Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle" />
+      <SegmentTemplate
+        timescale="1000"
+        initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
+        media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
+        <SegmentTimeline>
+          <S t="0" d="60000" r="9" />
+          <S d="24000" />
+        </SegmentTimeline>
+      </SegmentTemplate>
+      <Representation
+        id="textstream_eng=1000"
+        bandwidth="1000">
+      </Representation>
+    </AdaptationSet>
+    <AdaptationSet
+      id="3"
+      group="2"
+      contentType="video"
+      par="960:409"
+      minBandwidth="100000"
+      maxBandwidth="4482000"
+      maxWidth="1689"
+      maxHeight="720"
+      segmentAlignment="true"
+      mimeType="video/mp4"
+      codecs="avc1.4D401F"
+      startWithSAP="1">
+      <Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
+      <SegmentTemplate
+        timescale="12288"
+        initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
+        media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
+        <SegmentTimeline>
+          <S t="0" d="24576" r="443" />
+        </SegmentTimeline>
+      </SegmentTemplate>
+      <Representation
+        id="video=100000"
+        bandwidth="100000"
+        width="336"
+        height="144"
+        sar="2880:2863"
+        scanType="progressive">
+      </Representation>
+      <Representation
+        id="video=326000"
+        bandwidth="326000"
+        width="562"
+        height="240"
+        sar="115200:114929"
+        scanType="progressive">
+      </Representation>
+      <Representation
+        id="video=698000"
+        bandwidth="698000"
+        width="844"
+        height="360"
+        sar="86400:86299"
+        scanType="progressive">
+      </Representation>
+      <Representation
+        id="video=1493000"
+        bandwidth="1493000"
+        width="1126"
+        height="480"
+        sar="230400:230267"
+        scanType="progressive">
+      </Representation>
+      <Representation
+        id="video=4482000"
+        bandwidth="4482000"
+        width="1688"
+        height="720"
+        sar="86400:86299"
+        scanType="progressive">
+      </Representation>
+    </AdaptationSet>
+  </Period>
+</MPD>

+ 32 - 0
test/testdata/mpd/url_and_range.mpd

@@ -0,0 +1,32 @@
+<?xml version="1.0" ?>
+<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" profiles="urn:mpeg:dash:profile:isoff-live:2011" minBufferTime="PT10.01S" mediaPresentationDuration="PT30.097S" type="static">
+  <!-- Created with Bento4 mp4-dash.py, VERSION=2.0.0-639 -->
+  <Period>
+    <!-- Video -->
+    <AdaptationSet mimeType="video/mp4" segmentAlignment="true" startWithSAP="1" maxWidth="768" maxHeight="432">
+      <Representation id="video-avc1" codecs="avc1.4D401E" width="768" height="432" scanType="progressive" frameRate="30000/1001" bandwidth="699597">
+        <SegmentList timescale="1000" duration="10010">
+          <Initialization sourceURL="video-frag.mp4" range="36-746"/>
+          <SegmentURL media="video-frag.mp4" mediaRange="747-876117"/>
+          <SegmentURL media="video-frag.mp4" mediaRange="876118-1466913"/>
+          <SegmentURL media="video-frag.mp4" mediaRange="1466914-1953954"/>
+          <SegmentURL media="video-frag.mp4" mediaRange="1953955-1994652"/>
+        </SegmentList>
+      </Representation>
+    </AdaptationSet>
+    <!-- Audio -->
+    <AdaptationSet mimeType="audio/mp4" startWithSAP="1" segmentAlignment="true">
+      <Representation id="audio-und-mp4a.40.2" codecs="mp4a.40.2" bandwidth="98808" audioSamplingRate="48000">
+        <AudioChannelConfiguration schemeIdUri="urn:mpeg:mpegB:cicp:ChannelConfiguration" value="2"/>
+        <SegmentList timescale="1000" duration="10010">
+          <Initialization sourceURL="audio-frag.mp4" range="32-623"/>
+          <SegmentURL media="audio-frag.mp4" mediaRange="624-124199"/>
+          <SegmentURL media="audio-frag.mp4" mediaRange="124200-250303"/>
+          <SegmentURL media="audio-frag.mp4" mediaRange="250304-374365"/>
+          <SegmentURL media="audio-frag.mp4" mediaRange="374366-374836"/>
+        </SegmentList>
+      </Representation>
+    </AdaptationSet>
+  </Period>
+</MPD>
+

+ 351 - 105
youtube_dl/YoutubeDL.py

@@ -4,11 +4,10 @@
 from __future__ import absolute_import, unicode_literals
 from __future__ import absolute_import, unicode_literals
 
 
 import collections
 import collections
-import contextlib
 import copy
 import copy
 import datetime
 import datetime
 import errno
 import errno
-import fileinput
+import functools
 import io
 import io
 import itertools
 import itertools
 import json
 import json
@@ -26,25 +25,39 @@ import tokenize
 import traceback
 import traceback
 import random
 import random
 
 
+try:
+    from ssl import OPENSSL_VERSION
+except ImportError:
+    # Must be Python 2.6, should be built against 1.0.2
+    OPENSSL_VERSION = 'OpenSSL 1.0.2(?)'
 from string import ascii_letters
 from string import ascii_letters
 
 
 from .compat import (
 from .compat import (
     compat_basestring,
     compat_basestring,
-    compat_cookiejar,
+    compat_collections_chain_map as ChainMap,
+    compat_filter as filter,
     compat_get_terminal_size,
     compat_get_terminal_size,
     compat_http_client,
     compat_http_client,
+    compat_http_cookiejar_Cookie,
+    compat_http_cookies_SimpleCookie,
+    compat_integer_types,
     compat_kwargs,
     compat_kwargs,
+    compat_map as map,
     compat_numeric_types,
     compat_numeric_types,
+    compat_open as open,
     compat_os_name,
     compat_os_name,
     compat_str,
     compat_str,
     compat_tokenize_tokenize,
     compat_tokenize_tokenize,
     compat_urllib_error,
     compat_urllib_error,
+    compat_urllib_parse,
     compat_urllib_request,
     compat_urllib_request,
     compat_urllib_request_DataHandler,
     compat_urllib_request_DataHandler,
 )
 )
 from .utils import (
 from .utils import (
+    _UnsafeExtensionError,
     age_restricted,
     age_restricted,
     args_to_str,
     args_to_str,
+    bug_reports_message,
     ContentTooShortError,
     ContentTooShortError,
     date_from_str,
     date_from_str,
     DateRange,
     DateRange,
@@ -62,7 +75,9 @@ from .utils import (
     GeoRestrictedError,
     GeoRestrictedError,
     int_or_none,
     int_or_none,
     ISO3166Utils,
     ISO3166Utils,
+    join_nonempty,
     locked_file,
     locked_file,
+    LazyList,
     make_HTTPS_handler,
     make_HTTPS_handler,
     MaxDownloadsReached,
     MaxDownloadsReached,
     orderedSet,
     orderedSet,
@@ -73,6 +88,7 @@ from .utils import (
     PostProcessingError,
     PostProcessingError,
     preferredencoding,
     preferredencoding,
     prepend_extension,
     prepend_extension,
+    process_communicate_or_kill,
     register_socks_protocols,
     register_socks_protocols,
     render_table,
     render_table,
     replace_extension,
     replace_extension,
@@ -84,6 +100,7 @@ from .utils import (
     std_headers,
     std_headers,
     str_or_none,
     str_or_none,
     subtitles_filename,
     subtitles_filename,
+    traverse_obj,
     UnavailableVideoError,
     UnavailableVideoError,
     url_basename,
     url_basename,
     version_tuple,
     version_tuple,
@@ -93,6 +110,7 @@ from .utils import (
     YoutubeDLCookieProcessor,
     YoutubeDLCookieProcessor,
     YoutubeDLHandler,
     YoutubeDLHandler,
     YoutubeDLRedirectHandler,
     YoutubeDLRedirectHandler,
+    ytdl_is_updateable,
 )
 )
 from .cache import Cache
 from .cache import Cache
 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
@@ -113,6 +131,20 @@ if compat_os_name == 'nt':
     import ctypes
     import ctypes
 
 
 
 
+def _catch_unsafe_file_extension(func):
+    @functools.wraps(func)
+    def wrapper(self, *args, **kwargs):
+        try:
+            return func(self, *args, **kwargs)
+        except _UnsafeExtensionError as error:
+            self.report_error(
+                '{0} found; to avoid damaging your system, this value is disallowed.'
+                ' If you believe this is an error{1}'.format(
+                    error_to_compat_str(error), bug_reports_message(',')))
+
+    return wrapper
+
+
 class YoutubeDL(object):
 class YoutubeDL(object):
     """YoutubeDL class.
     """YoutubeDL class.
 
 
@@ -362,6 +394,9 @@ class YoutubeDL(object):
         self.params.update(params)
         self.params.update(params)
         self.cache = Cache(self)
         self.cache = Cache(self)
 
 
+        self._header_cookies = []
+        self._load_cookies_from_headers(self.params.get('http_headers'))
+
         def check_deprecated(param, option, suggestion):
         def check_deprecated(param, option, suggestion):
             if self.params.get(param) is not None:
             if self.params.get(param) is not None:
                 self.report_warning(
                 self.report_warning(
@@ -568,7 +603,7 @@ class YoutubeDL(object):
         if self.params.get('cookiefile') is not None:
         if self.params.get('cookiefile') is not None:
             self.cookiejar.save(ignore_discard=True, ignore_expires=True)
             self.cookiejar.save(ignore_discard=True, ignore_expires=True)
 
 
-    def trouble(self, message=None, tb=None):
+    def trouble(self, *args, **kwargs):
         """Determine action to take when a download problem appears.
         """Determine action to take when a download problem appears.
 
 
         Depending on if the downloader has been configured to ignore
         Depending on if the downloader has been configured to ignore
@@ -577,6 +612,11 @@ class YoutubeDL(object):
 
 
         tb, if given, is additional traceback information.
         tb, if given, is additional traceback information.
         """
         """
+        # message=None, tb=None, is_error=True
+        message = args[0] if len(args) > 0 else kwargs.get('message', None)
+        tb = args[1] if len(args) > 1 else kwargs.get('tb', None)
+        is_error = args[2] if len(args) > 2 else kwargs.get('is_error', True)
+
         if message is not None:
         if message is not None:
             self.to_stderr(message)
             self.to_stderr(message)
         if self.params.get('verbose'):
         if self.params.get('verbose'):
@@ -589,7 +629,10 @@ class YoutubeDL(object):
                 else:
                 else:
                     tb_data = traceback.format_list(traceback.extract_stack())
                     tb_data = traceback.format_list(traceback.extract_stack())
                     tb = ''.join(tb_data)
                     tb = ''.join(tb_data)
-            self.to_stderr(tb)
+            if tb:
+                self.to_stderr(tb)
+        if not is_error:
+            return
         if not self.params.get('ignoreerrors', False):
         if not self.params.get('ignoreerrors', False):
             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
                 exc_info = sys.exc_info()[1].exc_info
                 exc_info = sys.exc_info()[1].exc_info
@@ -598,11 +641,18 @@ class YoutubeDL(object):
             raise DownloadError(message, exc_info)
             raise DownloadError(message, exc_info)
         self._download_retcode = 1
         self._download_retcode = 1
 
 
-    def report_warning(self, message):
+    def report_warning(self, message, only_once=False, _cache={}):
         '''
         '''
         Print the message to stderr, it will be prefixed with 'WARNING:'
         Print the message to stderr, it will be prefixed with 'WARNING:'
         If stderr is a tty file the 'WARNING:' will be colored
         If stderr is a tty file the 'WARNING:' will be colored
         '''
         '''
+        if only_once:
+            m_hash = hash((self, message))
+            m_cnt = _cache.setdefault(m_hash, 0)
+            _cache[m_hash] = m_cnt + 1
+            if m_cnt > 0:
+                return
+
         if self.params.get('logger') is not None:
         if self.params.get('logger') is not None:
             self.params['logger'].warning(message)
             self.params['logger'].warning(message)
         else:
         else:
@@ -615,7 +665,7 @@ class YoutubeDL(object):
             warning_message = '%s %s' % (_msg_header, message)
             warning_message = '%s %s' % (_msg_header, message)
             self.to_stderr(warning_message)
             self.to_stderr(warning_message)
 
 
-    def report_error(self, message, tb=None):
+    def report_error(self, message, *args, **kwargs):
         '''
         '''
         Do the same as trouble, but prefixes the message with 'ERROR:', colored
         Do the same as trouble, but prefixes the message with 'ERROR:', colored
         in red if stderr is a tty file.
         in red if stderr is a tty file.
@@ -624,8 +674,18 @@ class YoutubeDL(object):
             _msg_header = '\033[0;31mERROR:\033[0m'
             _msg_header = '\033[0;31mERROR:\033[0m'
         else:
         else:
             _msg_header = 'ERROR:'
             _msg_header = 'ERROR:'
-        error_message = '%s %s' % (_msg_header, message)
-        self.trouble(error_message, tb)
+        kwargs['message'] = '%s %s' % (_msg_header, message)
+        self.trouble(*args, **kwargs)
+
+    def report_unscoped_cookies(self, *args, **kwargs):
+        # message=None, tb=False, is_error=False
+        if len(args) <= 2:
+            kwargs.setdefault('is_error', False)
+            if len(args) <= 0:
+                kwargs.setdefault(
+                    'message',
+                    'Unscoped cookies are not allowed: please specify some sort of scoping')
+        self.report_error(*args, **kwargs)
 
 
     def report_file_already_downloaded(self, file_name):
     def report_file_already_downloaded(self, file_name):
         """Report file has already been fully downloaded."""
         """Report file has already been fully downloaded."""
@@ -720,7 +780,7 @@ class YoutubeDL(object):
                 filename = encodeFilename(filename, True).decode(preferredencoding())
                 filename = encodeFilename(filename, True).decode(preferredencoding())
             return sanitize_path(filename)
             return sanitize_path(filename)
         except ValueError as err:
         except ValueError as err:
-            self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
+            self.report_error('Error in output template: ' + error_to_compat_str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
             return None
             return None
 
 
     def _match_entry(self, info_dict, incomplete):
     def _match_entry(self, info_dict, incomplete):
@@ -821,7 +881,7 @@ class YoutubeDL(object):
                 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
                 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
                 self.report_error(msg)
                 self.report_error(msg)
             except ExtractorError as e:  # An error we somewhat expected
             except ExtractorError as e:  # An error we somewhat expected
-                self.report_error(compat_str(e), e.format_traceback())
+                self.report_error(compat_str(e), tb=e.format_traceback())
             except MaxDownloadsReached:
             except MaxDownloadsReached:
                 raise
                 raise
             except Exception as e:
             except Exception as e:
@@ -831,8 +891,83 @@ class YoutubeDL(object):
                     raise
                     raise
         return wrapper
         return wrapper
 
 
+    def _remove_cookie_header(self, http_headers):
+        """Filters out `Cookie` header from an `http_headers` dict
+        The `Cookie` header is removed to prevent leaks as a result of unscoped cookies.
+        See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
+
+        @param http_headers     An `http_headers` dict from which any `Cookie` header
+                                should be removed, or None
+        """
+        return dict(filter(lambda pair: pair[0].lower() != 'cookie', (http_headers or {}).items()))
+
+    def _load_cookies(self, data, **kwargs):
+        """Loads cookies from a `Cookie` header
+
+        This tries to work around the security vulnerability of passing cookies to every domain.
+
+        @param data         The Cookie header as a string to load the cookies from
+        @param autoscope    If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
+                            If `True`, save cookies for later to be stored in the jar with a limited scope
+                            If a URL, save cookies in the jar with the domain of the URL
+        """
+        # autoscope=True (kw-only)
+        autoscope = kwargs.get('autoscope', True)
+
+        for cookie in compat_http_cookies_SimpleCookie(data).values() if data else []:
+            if autoscope and any(cookie.values()):
+                raise ValueError('Invalid syntax in Cookie Header')
+
+            domain = cookie.get('domain') or ''
+            expiry = cookie.get('expires')
+            if expiry == '':  # 0 is valid so we check for `''` explicitly
+                expiry = None
+            prepared_cookie = compat_http_cookiejar_Cookie(
+                cookie.get('version') or 0, cookie.key, cookie.value, None, False,
+                domain, True, True, cookie.get('path') or '', bool(cookie.get('path')),
+                bool(cookie.get('secure')), expiry, False, None, None, {})
+
+            if domain:
+                self.cookiejar.set_cookie(prepared_cookie)
+            elif autoscope is True:
+                self.report_warning(
+                    'Passing cookies as a header is a potential security risk; '
+                    'they will be scoped to the domain of the downloaded urls. '
+                    'Please consider loading cookies from a file or browser instead.',
+                    only_once=True)
+                self._header_cookies.append(prepared_cookie)
+            elif autoscope:
+                self.report_warning(
+                    'The extractor result contains an unscoped cookie as an HTTP header. '
+                    'If you are specifying an input URL, ' + bug_reports_message(),
+                    only_once=True)
+                self._apply_header_cookies(autoscope, [prepared_cookie])
+            else:
+                self.report_unscoped_cookies()
+
+    def _load_cookies_from_headers(self, headers):
+        self._load_cookies(traverse_obj(headers, 'cookie', casesense=False))
+
+    def _apply_header_cookies(self, url, cookies=None):
+        """This method applies stray header cookies to the provided url
+
+        This loads header cookies and scopes them to the domain provided in `url`.
+        While this is not ideal, it helps reduce the risk of them being sent to
+        an unintended destination.
+        """
+        parsed = compat_urllib_parse.urlparse(url)
+        if not parsed.hostname:
+            return
+
+        for cookie in map(copy.copy, cookies or self._header_cookies):
+            cookie.domain = '.' + parsed.hostname
+            self.cookiejar.set_cookie(cookie)
+
     @__handle_extraction_exceptions
     @__handle_extraction_exceptions
     def __extract_info(self, url, ie, download, extra_info, process):
     def __extract_info(self, url, ie, download, extra_info, process):
+        # Compat with passing cookies in http headers
+        self._apply_header_cookies(url)
+
         ie_result = ie.extract(url)
         ie_result = ie.extract(url)
         if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
         if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
             return
             return
@@ -858,7 +993,7 @@ class YoutubeDL(object):
 
 
     def process_ie_result(self, ie_result, download=True, extra_info={}):
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
         """
-        Take the result of the ie(may be modified) and resolve all unresolved
+        Take the result of the ie (may be modified) and resolve all unresolved
         references (URLs, playlist items).
         references (URLs, playlist items).
 
 
         It will also download the videos if 'download'.
         It will also download the videos if 'download'.
@@ -920,8 +1055,8 @@ class YoutubeDL(object):
         elif result_type in ('playlist', 'multi_video'):
         elif result_type in ('playlist', 'multi_video'):
             # Protect from infinite recursion due to recursively nested playlists
             # Protect from infinite recursion due to recursively nested playlists
             # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
             # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
-            webpage_url = ie_result['webpage_url']
-            if webpage_url in self._playlist_urls:
+            webpage_url = ie_result.get('webpage_url')  # not all pl/mv have this
+            if webpage_url and webpage_url in self._playlist_urls:
                 self.to_screen(
                 self.to_screen(
                     '[download] Skipping already downloaded playlist: %s'
                     '[download] Skipping already downloaded playlist: %s'
                     % ie_result.get('title') or ie_result.get('id'))
                     % ie_result.get('title') or ie_result.get('id'))
@@ -929,6 +1064,10 @@ class YoutubeDL(object):
 
 
             self._playlist_level += 1
             self._playlist_level += 1
             self._playlist_urls.add(webpage_url)
             self._playlist_urls.add(webpage_url)
+            new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
+            if new_result:
+                new_result.update(ie_result)
+                ie_result = new_result
             try:
             try:
                 return self.__process_playlist(ie_result, download)
                 return self.__process_playlist(ie_result, download)
             finally:
             finally:
@@ -1385,17 +1524,16 @@ class YoutubeDL(object):
                         'abr': formats_info[1].get('abr'),
                         'abr': formats_info[1].get('abr'),
                         'ext': output_ext,
                         'ext': output_ext,
                     }
                     }
-                video_selector, audio_selector = map(_build_selector_function, selector.selector)
 
 
                 def selector_function(ctx):
                 def selector_function(ctx):
-                    for pair in itertools.product(
-                            video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
+                    selector_fn = lambda x: _build_selector_function(x)(ctx)
+                    for pair in itertools.product(*map(selector_fn, selector.selector)):
                         yield _merge(pair)
                         yield _merge(pair)
 
 
             filters = [self._build_format_filter(f) for f in selector.filters]
             filters = [self._build_format_filter(f) for f in selector.filters]
 
 
             def final_selector(ctx):
             def final_selector(ctx):
-                ctx_copy = copy.deepcopy(ctx)
+                ctx_copy = dict(ctx)
                 for _filter in filters:
                 for _filter in filters:
                     ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
                     ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
                 return selector_function(ctx_copy)
                 return selector_function(ctx_copy)
@@ -1430,29 +1568,73 @@ class YoutubeDL(object):
         parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
         parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
         return _build_selector_function(parsed_selector)
         return _build_selector_function(parsed_selector)
 
 
-    def _calc_headers(self, info_dict):
-        res = std_headers.copy()
-
-        add_headers = info_dict.get('http_headers')
-        if add_headers:
-            res.update(add_headers)
+    def _calc_headers(self, info_dict, load_cookies=False):
+        if load_cookies:  # For --load-info-json
+            # load cookies from http_headers in legacy info.json
+            self._load_cookies(traverse_obj(info_dict, ('http_headers', 'Cookie'), casesense=False),
+                               autoscope=info_dict['url'])
+            # load scoped cookies from info.json
+            self._load_cookies(info_dict.get('cookies'), autoscope=False)
 
 
-        cookies = self._calc_cookies(info_dict)
+        cookies = self.cookiejar.get_cookies_for_url(info_dict['url'])
         if cookies:
         if cookies:
-            res['Cookie'] = cookies
+            # Make a string like name1=val1; attr1=a_val1; ...name2=val2; ...
+            # By convention a cookie name can't be a well-known attribute name
+            # so this syntax is unambiguous and can be parsed by (eg) SimpleCookie
+            encoder = compat_http_cookies_SimpleCookie()
+            values = []
+            attributes = (('Domain', '='), ('Path', '='), ('Secure',), ('Expires', '='), ('Version', '='))
+            attributes = tuple([x[0].lower()] + list(x) for x in attributes)
+            for cookie in cookies:
+                _, value = encoder.value_encode(cookie.value)
+                # Py 2 '' --> '', Py 3 '' --> '""'
+                if value == '':
+                    value = '""'
+                values.append('='.join((cookie.name, value)))
+                for attr in attributes:
+                    value = getattr(cookie, attr[0], None)
+                    if value:
+                        values.append('%s%s' % (''.join(attr[1:]), value if len(attr) == 3 else ''))
+            info_dict['cookies'] = '; '.join(values)
+
+        res = std_headers.copy()
+        res.update(info_dict.get('http_headers') or {})
+        res = self._remove_cookie_header(res)
 
 
         if 'X-Forwarded-For' not in res:
         if 'X-Forwarded-For' not in res:
             x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
             x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
             if x_forwarded_for_ip:
             if x_forwarded_for_ip:
                 res['X-Forwarded-For'] = x_forwarded_for_ip
                 res['X-Forwarded-For'] = x_forwarded_for_ip
 
 
-        return res
+        return res or None
 
 
     def _calc_cookies(self, info_dict):
     def _calc_cookies(self, info_dict):
         pr = sanitized_Request(info_dict['url'])
         pr = sanitized_Request(info_dict['url'])
         self.cookiejar.add_cookie_header(pr)
         self.cookiejar.add_cookie_header(pr)
         return pr.get_header('Cookie')
         return pr.get_header('Cookie')
 
 
+    def _fill_common_fields(self, info_dict, final=True):
+
+        for ts_key, date_key in (
+                ('timestamp', 'upload_date'),
+                ('release_timestamp', 'release_date'),
+        ):
+            if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
+                # Working around out-of-range timestamp values (e.g. negative ones on Windows,
+                # see http://bugs.python.org/issue1646728)
+                try:
+                    upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
+                    info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
+                except (ValueError, OverflowError, OSError):
+                    pass
+
+        # Auto generate title fields corresponding to the *_number fields when missing
+        # in order to always have clean titles. This is very common for TV series.
+        if final:
+            for field in ('chapter', 'season', 'episode'):
+                if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
+                    info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+
     def process_video_result(self, info_dict, download=True):
     def process_video_result(self, info_dict, download=True):
         assert info_dict.get('_type', 'video') == 'video'
         assert info_dict.get('_type', 'video') == 'video'
 
 
@@ -1520,24 +1702,7 @@ class YoutubeDL(object):
         if 'display_id' not in info_dict and 'id' in info_dict:
         if 'display_id' not in info_dict and 'id' in info_dict:
             info_dict['display_id'] = info_dict['id']
             info_dict['display_id'] = info_dict['id']
 
 
-        for ts_key, date_key in (
-                ('timestamp', 'upload_date'),
-                ('release_timestamp', 'release_date'),
-        ):
-            if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
-                # Working around out-of-range timestamp values (e.g. negative ones on Windows,
-                # see http://bugs.python.org/issue1646728)
-                try:
-                    upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
-                    info_dict[date_key] = upload_date.strftime('%Y%m%d')
-                except (ValueError, OverflowError, OSError):
-                    pass
-
-        # Auto generate title fields corresponding to the *_number fields when missing
-        # in order to always have clean titles. This is very common for TV series.
-        for field in ('chapter', 'season', 'episode'):
-            if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
-                info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+        self._fill_common_fields(info_dict)
 
 
         for cc_kind in ('subtitles', 'automatic_captions'):
         for cc_kind in ('subtitles', 'automatic_captions'):
             cc = info_dict.get(cc_kind)
             cc = info_dict.get(cc_kind)
@@ -1569,9 +1734,6 @@ class YoutubeDL(object):
         else:
         else:
             formats = info_dict['formats']
             formats = info_dict['formats']
 
 
-        if not formats:
-            raise ExtractorError('No video formats found!')
-
         def is_wellformed(f):
         def is_wellformed(f):
             url = f.get('url')
             url = f.get('url')
             if not url:
             if not url:
@@ -1584,7 +1746,10 @@ class YoutubeDL(object):
             return True
             return True
 
 
         # Filter out malformed formats for better extraction robustness
         # Filter out malformed formats for better extraction robustness
-        formats = list(filter(is_wellformed, formats))
+        formats = list(filter(is_wellformed, formats or []))
+
+        if not formats:
+            raise ExtractorError('No video formats found!')
 
 
         formats_dict = {}
         formats_dict = {}
 
 
@@ -1625,10 +1790,13 @@ class YoutubeDL(object):
                 format['protocol'] = determine_protocol(format)
                 format['protocol'] = determine_protocol(format)
             # Add HTTP headers, so that external programs can use them from the
             # Add HTTP headers, so that external programs can use them from the
             # json output
             # json output
-            full_format_info = info_dict.copy()
-            full_format_info.update(format)
-            format['http_headers'] = self._calc_headers(full_format_info)
-        # Remove private housekeeping stuff
+            format['http_headers'] = self._calc_headers(ChainMap(format, info_dict), load_cookies=True)
+
+        # Safeguard against old/insecure infojson when using --load-info-json
+        info_dict['http_headers'] = self._remove_cookie_header(
+            info_dict.get('http_headers') or {}) or None
+
+        # Remove private housekeeping stuff (copied to http_headers in _calc_headers())
         if '__x_forwarded_for_ip' in info_dict:
         if '__x_forwarded_for_ip' in info_dict:
             del info_dict['__x_forwarded_for_ip']
             del info_dict['__x_forwarded_for_ip']
 
 
@@ -1771,17 +1939,17 @@ class YoutubeDL(object):
             self.to_stdout(formatSeconds(info_dict['duration']))
             self.to_stdout(formatSeconds(info_dict['duration']))
         print_mandatory('format')
         print_mandatory('format')
         if self.params.get('forcejson', False):
         if self.params.get('forcejson', False):
-            self.to_stdout(json.dumps(info_dict))
+            self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
 
 
+    @_catch_unsafe_file_extension
     def process_info(self, info_dict):
     def process_info(self, info_dict):
         """Process a single resolved IE result."""
         """Process a single resolved IE result."""
 
 
         assert info_dict.get('_type', 'video') == 'video'
         assert info_dict.get('_type', 'video') == 'video'
 
 
-        max_downloads = self.params.get('max_downloads')
-        if max_downloads is not None:
-            if self._num_downloads >= int(max_downloads):
-                raise MaxDownloadsReached()
+        max_downloads = int_or_none(self.params.get('max_downloads')) or float('inf')
+        if self._num_downloads >= max_downloads:
+            raise MaxDownloadsReached()
 
 
         # TODO: backward compatibility, to be removed
         # TODO: backward compatibility, to be removed
         info_dict['fulltitle'] = info_dict['title']
         info_dict['fulltitle'] = info_dict['title']
@@ -1832,7 +2000,7 @@ class YoutubeDL(object):
             else:
             else:
                 try:
                 try:
                     self.to_screen('[info] Writing video description to: ' + descfn)
                     self.to_screen('[info] Writing video description to: ' + descfn)
-                    with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
+                    with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
                         descfile.write(info_dict['description'])
                         descfile.write(info_dict['description'])
                 except (OSError, IOError):
                 except (OSError, IOError):
                     self.report_error('Cannot write description file ' + descfn)
                     self.report_error('Cannot write description file ' + descfn)
@@ -1847,7 +2015,7 @@ class YoutubeDL(object):
             else:
             else:
                 try:
                 try:
                     self.to_screen('[info] Writing video annotations to: ' + annofn)
                     self.to_screen('[info] Writing video annotations to: ' + annofn)
-                    with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
+                    with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
                         annofile.write(info_dict['annotations'])
                         annofile.write(info_dict['annotations'])
                 except (KeyError, TypeError):
                 except (KeyError, TypeError):
                     self.report_warning('There are no annotations to write.')
                     self.report_warning('There are no annotations to write.')
@@ -1874,7 +2042,7 @@ class YoutubeDL(object):
                         try:
                         try:
                             # Use newline='' to prevent conversion of newline characters
                             # Use newline='' to prevent conversion of newline characters
                             # See https://github.com/ytdl-org/youtube-dl/issues/10268
                             # See https://github.com/ytdl-org/youtube-dl/issues/10268
-                            with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
+                            with open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
                                 subfile.write(sub_info['data'])
                                 subfile.write(sub_info['data'])
                         except (OSError, IOError):
                         except (OSError, IOError):
                             self.report_error('Cannot write subtitles file ' + sub_filename)
                             self.report_error('Cannot write subtitles file ' + sub_filename)
@@ -1883,36 +2051,41 @@ class YoutubeDL(object):
                         try:
                         try:
                             sub_data = ie._request_webpage(
                             sub_data = ie._request_webpage(
                                 sub_info['url'], info_dict['id'], note=False).read()
                                 sub_info['url'], info_dict['id'], note=False).read()
-                            with io.open(encodeFilename(sub_filename), 'wb') as subfile:
+                            with open(encodeFilename(sub_filename), 'wb') as subfile:
                                 subfile.write(sub_data)
                                 subfile.write(sub_data)
                         except (ExtractorError, IOError, OSError, ValueError) as err:
                         except (ExtractorError, IOError, OSError, ValueError) as err:
                             self.report_warning('Unable to download subtitle for "%s": %s' %
                             self.report_warning('Unable to download subtitle for "%s": %s' %
                                                 (sub_lang, error_to_compat_str(err)))
                                                 (sub_lang, error_to_compat_str(err)))
                             continue
                             continue
 
 
-        if self.params.get('writeinfojson', False):
-            infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
-            if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
-                self.to_screen('[info] Video description metadata is already present')
-            else:
-                self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
-                try:
-                    write_json_file(self.filter_requested_info(info_dict), infofn)
-                except (OSError, IOError):
-                    self.report_error('Cannot write metadata to JSON file ' + infofn)
-                    return
+        self._write_info_json(
+            'video description', info_dict,
+            replace_extension(filename, 'info.json', info_dict.get('ext')))
 
 
         self._write_thumbnails(info_dict, filename)
         self._write_thumbnails(info_dict, filename)
 
 
         if not self.params.get('skip_download', False):
         if not self.params.get('skip_download', False):
             try:
             try:
+                def checked_get_suitable_downloader(info_dict, params):
+                    ed_args = params.get('external_downloader_args')
+                    dler = get_suitable_downloader(info_dict, params)
+                    if ed_args and not params.get('external_downloader_args'):
+                        # external_downloader_args was cleared because external_downloader was rejected
+                        self.report_warning('Requested external downloader cannot be used: '
+                                            'ignoring --external-downloader-args.')
+                    return dler
+
                 def dl(name, info):
                 def dl(name, info):
-                    fd = get_suitable_downloader(info, self.params)(self, self.params)
+                    fd = checked_get_suitable_downloader(info, self.params)(self, self.params)
                     for ph in self._progress_hooks:
                     for ph in self._progress_hooks:
                         fd.add_progress_hook(ph)
                         fd.add_progress_hook(ph)
                     if self.params.get('verbose'):
                     if self.params.get('verbose'):
                         self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
                         self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
-                    return fd.download(name, info)
+
+                    new_info = dict((k, v) for k, v in info.items() if not k.startswith('__p'))
+                    new_info['http_headers'] = self._calc_headers(new_info)
+
+                    return fd.download(name, new_info)
 
 
                 if info_dict.get('requested_formats') is not None:
                 if info_dict.get('requested_formats') is not None:
                     downloaded = []
                     downloaded = []
@@ -1941,18 +2114,26 @@ class YoutubeDL(object):
                         # TODO: Check acodec/vcodec
                         # TODO: Check acodec/vcodec
                         return False
                         return False
 
 
-                    filename_real_ext = os.path.splitext(filename)[1][1:]
-                    filename_wo_ext = (
-                        os.path.splitext(filename)[0]
-                        if filename_real_ext == info_dict['ext']
-                        else filename)
+                    exts = [info_dict['ext']]
                     requested_formats = info_dict['requested_formats']
                     requested_formats = info_dict['requested_formats']
                     if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
                     if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
                         info_dict['ext'] = 'mkv'
                         info_dict['ext'] = 'mkv'
                         self.report_warning(
                         self.report_warning(
                             'Requested formats are incompatible for merge and will be merged into mkv.')
                             'Requested formats are incompatible for merge and will be merged into mkv.')
+                    exts.append(info_dict['ext'])
+
                     # Ensure filename always has a correct extension for successful merge
                     # Ensure filename always has a correct extension for successful merge
-                    filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
+                    def correct_ext(filename, ext=exts[1]):
+                        if filename == '-':
+                            return filename
+                        f_name, f_real_ext = os.path.splitext(filename)
+                        f_real_ext = f_real_ext[1:]
+                        filename_wo_ext = f_name if f_real_ext in exts else filename
+                        if ext is None:
+                            ext = f_real_ext or None
+                        return join_nonempty(filename_wo_ext, ext, delim='.')
+
+                    filename = correct_ext(filename)
                     if os.path.exists(encodeFilename(filename)):
                     if os.path.exists(encodeFilename(filename)):
                         self.to_screen(
                         self.to_screen(
                             '[download] %s has already been downloaded and '
                             '[download] %s has already been downloaded and '
@@ -1962,8 +2143,9 @@ class YoutubeDL(object):
                             new_info = dict(info_dict)
                             new_info = dict(info_dict)
                             new_info.update(f)
                             new_info.update(f)
                             fname = prepend_extension(
                             fname = prepend_extension(
-                                self.prepare_filename(new_info),
-                                'f%s' % f['format_id'], new_info['ext'])
+                                correct_ext(
+                                    self.prepare_filename(new_info), new_info['ext']),
+                                'f%s' % (f['format_id'],), new_info['ext'])
                             if not ensure_dir_exists(fname):
                             if not ensure_dir_exists(fname):
                                 return
                                 return
                             downloaded.append(fname)
                             downloaded.append(fname)
@@ -2049,9 +2231,12 @@ class YoutubeDL(object):
                 try:
                 try:
                     self.post_process(filename, info_dict)
                     self.post_process(filename, info_dict)
                 except (PostProcessingError) as err:
                 except (PostProcessingError) as err:
-                    self.report_error('postprocessing: %s' % str(err))
+                    self.report_error('postprocessing: %s' % error_to_compat_str(err))
                     return
                     return
                 self.record_download_archive(info_dict)
                 self.record_download_archive(info_dict)
+                # avoid possible nugatory search for further items (PR #26638)
+                if self._num_downloads >= max_downloads:
+                    raise MaxDownloadsReached()
 
 
     def download(self, url_list):
     def download(self, url_list):
         """Download a given list of URLs."""
         """Download a given list of URLs."""
@@ -2074,16 +2259,13 @@ class YoutubeDL(object):
                 raise
                 raise
             else:
             else:
                 if self.params.get('dump_single_json', False):
                 if self.params.get('dump_single_json', False):
-                    self.to_stdout(json.dumps(res))
+                    self.to_stdout(json.dumps(self.sanitize_info(res)))
 
 
         return self._download_retcode
         return self._download_retcode
 
 
     def download_with_info_file(self, info_filename):
     def download_with_info_file(self, info_filename):
-        with contextlib.closing(fileinput.FileInput(
-                [info_filename], mode='r',
-                openhook=fileinput.hook_encoded('utf-8'))) as f:
-            # FileInput doesn't have a read method, we can't call json.load
-            info = self.filter_requested_info(json.loads('\n'.join(f)))
+        with open(info_filename, encoding='utf-8') as f:
+            info = self.filter_requested_info(json.load(f))
         try:
         try:
             self.process_ie_result(info, download=True)
             self.process_ie_result(info, download=True)
         except DownloadError:
         except DownloadError:
@@ -2096,10 +2278,36 @@ class YoutubeDL(object):
         return self._download_retcode
         return self._download_retcode
 
 
     @staticmethod
     @staticmethod
-    def filter_requested_info(info_dict):
-        return dict(
-            (k, v) for k, v in info_dict.items()
-            if k not in ['requested_formats', 'requested_subtitles'])
+    def sanitize_info(info_dict, remove_private_keys=False):
+        ''' Sanitize the infodict for converting to json '''
+        if info_dict is None:
+            return info_dict
+
+        if remove_private_keys:
+            reject = lambda k, v: (v is None
+                                   or k.startswith('__')
+                                   or k in ('requested_formats',
+                                            'requested_subtitles'))
+        else:
+            reject = lambda k, v: False
+
+        def filter_fn(obj):
+            if isinstance(obj, dict):
+                return dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v))
+            elif isinstance(obj, (list, tuple, set, LazyList)):
+                return list(map(filter_fn, obj))
+            elif obj is None or any(isinstance(obj, c)
+                                    for c in (compat_integer_types,
+                                              (compat_str, float, bool))):
+                return obj
+            else:
+                return repr(obj)
+
+        return filter_fn(info_dict)
+
+    @classmethod
+    def filter_requested_info(cls, info_dict):
+        return cls.sanitize_info(info_dict, True)
 
 
     def post_process(self, filename, ie_info):
     def post_process(self, filename, ie_info):
         """Run all the postprocessors on the given file."""
         """Run all the postprocessors on the given file."""
@@ -2306,18 +2514,21 @@ class YoutubeDL(object):
                 self.get_encoding()))
                 self.get_encoding()))
         write_string(encoding_str, encoding=None)
         write_string(encoding_str, encoding=None)
 
 
-        self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
+        writeln_debug = lambda *s: self._write_string('[debug] %s\n' % (''.join(s), ))
+        writeln_debug('youtube-dl version ', __version__)
         if _LAZY_LOADER:
         if _LAZY_LOADER:
-            self._write_string('[debug] Lazy loading extractors enabled' + '\n')
+            writeln_debug('Lazy loading extractors enabled')
+        if ytdl_is_updateable():
+            writeln_debug('Single file build')
         try:
         try:
             sp = subprocess.Popen(
             sp = subprocess.Popen(
                 ['git', 'rev-parse', '--short', 'HEAD'],
                 ['git', 'rev-parse', '--short', 'HEAD'],
                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                 cwd=os.path.dirname(os.path.abspath(__file__)))
                 cwd=os.path.dirname(os.path.abspath(__file__)))
-            out, err = sp.communicate()
+            out, err = process_communicate_or_kill(sp)
             out = out.decode().strip()
             out = out.decode().strip()
             if re.match('[0-9a-f]+', out):
             if re.match('[0-9a-f]+', out):
-                self._write_string('[debug] Git HEAD: ' + out + '\n')
+                writeln_debug('Git HEAD: ', out)
         except Exception:
         except Exception:
             try:
             try:
                 sys.exc_clear()
                 sys.exc_clear()
@@ -2330,9 +2541,22 @@ class YoutubeDL(object):
                 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
                 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
             return impl_name
             return impl_name
 
 
-        self._write_string('[debug] Python version %s (%s) - %s\n' % (
-            platform.python_version(), python_implementation(),
-            platform_name()))
+        def libc_ver():
+            try:
+                return platform.libc_ver()
+            except OSError:  # We may not have access to the executable
+                return []
+
+        libc = join_nonempty(*libc_ver(), delim=' ')
+        writeln_debug('Python %s (%s %s %s) - %s - %s%s' % (
+            platform.python_version(),
+            python_implementation(),
+            platform.machine(),
+            platform.architecture()[0],
+            platform_name(),
+            OPENSSL_VERSION,
+            (' - %s' % (libc, )) if libc else ''
+        ))
 
 
         exe_versions = FFmpegPostProcessor.get_versions(self)
         exe_versions = FFmpegPostProcessor.get_versions(self)
         exe_versions['rtmpdump'] = rtmpdump_version()
         exe_versions['rtmpdump'] = rtmpdump_version()
@@ -2344,17 +2568,17 @@ class YoutubeDL(object):
         )
         )
         if not exe_str:
         if not exe_str:
             exe_str = 'none'
             exe_str = 'none'
-        self._write_string('[debug] exe versions: %s\n' % exe_str)
+        writeln_debug('exe versions: %s' % (exe_str, ))
 
 
         proxy_map = {}
         proxy_map = {}
         for handler in self._opener.handlers:
         for handler in self._opener.handlers:
             if hasattr(handler, 'proxies'):
             if hasattr(handler, 'proxies'):
                 proxy_map.update(handler.proxies)
                 proxy_map.update(handler.proxies)
-        self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
+        writeln_debug('Proxy map: ', compat_str(proxy_map))
 
 
         if self.params.get('call_home', False):
         if self.params.get('call_home', False):
             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
             ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
-            self._write_string('[debug] Public IP address: %s\n' % ipaddr)
+            writeln_debug('Public IP address: %s' % (ipaddr, ))
             latest_version = self.urlopen(
             latest_version = self.urlopen(
                 'https://yt-dl.org/latest/version').read().decode('utf-8')
                 'https://yt-dl.org/latest/version').read().decode('utf-8')
             if version_tuple(latest_version) > version_tuple(__version__):
             if version_tuple(latest_version) > version_tuple(__version__):
@@ -2371,7 +2595,7 @@ class YoutubeDL(object):
         opts_proxy = self.params.get('proxy')
         opts_proxy = self.params.get('proxy')
 
 
         if opts_cookiefile is None:
         if opts_cookiefile is None:
-            self.cookiejar = compat_cookiejar.CookieJar()
+            self.cookiejar = YoutubeDLCookieJar()
         else:
         else:
             opts_cookiefile = expand_path(opts_cookiefile)
             opts_cookiefile = expand_path(opts_cookiefile)
             self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
             self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
@@ -2432,6 +2656,28 @@ class YoutubeDL(object):
             encoding = preferredencoding()
             encoding = preferredencoding()
         return encoding
         return encoding
 
 
+    def _write_info_json(self, label, info_dict, infofn, overwrite=None):
+        if not self.params.get('writeinfojson', False):
+            return False
+
+        def msg(fmt, lbl):
+            return fmt % (lbl + ' metadata',)
+
+        if overwrite is None:
+            overwrite = not self.params.get('nooverwrites', False)
+
+        if not overwrite and os.path.exists(encodeFilename(infofn)):
+            self.to_screen(msg('[info] %s is already present', label.title()))
+            return 'exists'
+        else:
+            self.to_screen(msg('[info] Writing %s as JSON to: ', label) + infofn)
+            try:
+                write_json_file(self.filter_requested_info(info_dict), infofn)
+                return True
+            except (OSError, IOError):
+                self.report_error(msg('Cannot write %s to JSON file ', label) + infofn)
+                return
+
     def _write_thumbnails(self, info_dict, filename):
     def _write_thumbnails(self, info_dict, filename):
         if self.params.get('writethumbnail', False):
         if self.params.get('writethumbnail', False):
             thumbnails = info_dict.get('thumbnails')
             thumbnails = info_dict.get('thumbnails')

+ 7 - 5
youtube_dl/__init__.py

@@ -5,7 +5,6 @@ from __future__ import unicode_literals
 
 
 __license__ = 'Public Domain'
 __license__ = 'Public Domain'
 
 
-import codecs
 import io
 import io
 import os
 import os
 import random
 import random
@@ -17,10 +16,12 @@ from .options import (
 )
 )
 from .compat import (
 from .compat import (
     compat_getpass,
     compat_getpass,
+    compat_register_utf8,
     compat_shlex_split,
     compat_shlex_split,
     workaround_optparse_bug9161,
     workaround_optparse_bug9161,
 )
 )
 from .utils import (
 from .utils import (
+    _UnsafeExtensionError,
     DateRange,
     DateRange,
     decodeOption,
     decodeOption,
     DEFAULT_OUTTMPL,
     DEFAULT_OUTTMPL,
@@ -46,10 +47,8 @@ from .YoutubeDL import YoutubeDL
 
 
 
 
 def _real_main(argv=None):
 def _real_main(argv=None):
-    # Compatibility fixes for Windows
-    if sys.platform == 'win32':
-        # https://github.com/ytdl-org/youtube-dl/issues/820
-        codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
+    # Compatibility fix for Windows
+    compat_register_utf8()
 
 
     workaround_optparse_bug9161()
     workaround_optparse_bug9161()
 
 
@@ -175,6 +174,9 @@ def _real_main(argv=None):
     if opts.ap_mso and opts.ap_mso not in MSO_INFO:
     if opts.ap_mso and opts.ap_mso not in MSO_INFO:
         parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')
         parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')
 
 
+    if opts.no_check_extensions:
+        _UnsafeExtensionError.lenient = True
+
     def parse_retries(retries):
     def parse_retries(retries):
         if retries in ('inf', 'infinite'):
         if retries in ('inf', 'infinite'):
             parsed_retries = float('inf')
             parsed_retries = float('inf')

+ 36 - 3
youtube_dl/aes.py

@@ -8,6 +8,18 @@ from .utils import bytes_to_intlist, intlist_to_bytes
 BLOCK_SIZE_BYTES = 16
 BLOCK_SIZE_BYTES = 16
 
 
 
 
+def pkcs7_padding(data):
+    """
+    PKCS#7 padding
+
+    @param {int[]} data        cleartext
+    @returns {int[]}           padding data
+    """
+
+    remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES
+    return data + [remaining_length] * remaining_length
+
+
 def aes_ctr_decrypt(data, key, counter):
 def aes_ctr_decrypt(data, key, counter):
     """
     """
     Decrypt with aes in counter mode
     Decrypt with aes in counter mode
@@ -76,8 +88,7 @@ def aes_cbc_encrypt(data, key, iv):
     previous_cipher_block = iv
     previous_cipher_block = iv
     for i in range(block_count):
     for i in range(block_count):
         block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
         block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
-        remaining_length = BLOCK_SIZE_BYTES - len(block)
-        block += [remaining_length] * remaining_length
+        block = pkcs7_padding(block)
         mixed_block = xor(block, previous_cipher_block)
         mixed_block = xor(block, previous_cipher_block)
 
 
         encrypted_block = aes_encrypt(mixed_block, expanded_key)
         encrypted_block = aes_encrypt(mixed_block, expanded_key)
@@ -88,6 +99,28 @@ def aes_cbc_encrypt(data, key, iv):
     return encrypted_data
     return encrypted_data
 
 
 
 
+def aes_ecb_encrypt(data, key):
+    """
+    Encrypt with aes in ECB mode. Using PKCS#7 padding
+
+    @param {int[]} data        cleartext
+    @param {int[]} key         16/24/32-Byte cipher key
+    @returns {int[]}           encrypted data
+    """
+    expanded_key = key_expansion(key)
+    block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
+
+    encrypted_data = []
+    for i in range(block_count):
+        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
+        block = pkcs7_padding(block)
+
+        encrypted_block = aes_encrypt(block, expanded_key)
+        encrypted_data += encrypted_block
+
+    return encrypted_data
+
+
 def key_expansion(data):
 def key_expansion(data):
     """
     """
     Generate key schedule
     Generate key schedule
@@ -303,7 +336,7 @@ def xor(data1, data2):
 
 
 
 
 def rijndael_mul(a, b):
 def rijndael_mul(a, b):
-    if(a == 0 or b == 0):
+    if (a == 0 or b == 0):
         return 0
         return 0
     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
     return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
 
 

+ 28 - 8
youtube_dl/cache.py

@@ -1,21 +1,32 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import errno
 import errno
-import io
 import json
 import json
 import os
 import os
 import re
 import re
 import shutil
 import shutil
 import traceback
 import traceback
 
 
-from .compat import compat_getenv
+from .compat import (
+    compat_getenv,
+    compat_open as open,
+)
 from .utils import (
 from .utils import (
+    error_to_compat_str,
     expand_path,
     expand_path,
+    is_outdated_version,
+    try_get,
     write_json_file,
     write_json_file,
 )
 )
+from .version import __version__
 
 
 
 
 class Cache(object):
 class Cache(object):
+
+    _YTDL_DIR = 'youtube-dl'
+    _VERSION_KEY = _YTDL_DIR + '_version'
+    _DEFAULT_VERSION = '2021.12.17'
+
     def __init__(self, ydl):
     def __init__(self, ydl):
         self._ydl = ydl
         self._ydl = ydl
 
 
@@ -23,7 +34,7 @@ class Cache(object):
         res = self._ydl.params.get('cachedir')
         res = self._ydl.params.get('cachedir')
         if res is None:
         if res is None:
             cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
             cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
-            res = os.path.join(cache_root, 'youtube-dl')
+            res = os.path.join(cache_root, self._YTDL_DIR)
         return expand_path(res)
         return expand_path(res)
 
 
     def _get_cache_fn(self, section, key, dtype):
     def _get_cache_fn(self, section, key, dtype):
@@ -50,13 +61,22 @@ class Cache(object):
             except OSError as ose:
             except OSError as ose:
                 if ose.errno != errno.EEXIST:
                 if ose.errno != errno.EEXIST:
                     raise
                     raise
-            write_json_file(data, fn)
+            write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn)
         except Exception:
         except Exception:
             tb = traceback.format_exc()
             tb = traceback.format_exc()
             self._ydl.report_warning(
             self._ydl.report_warning(
                 'Writing cache to %r failed: %s' % (fn, tb))
                 'Writing cache to %r failed: %s' % (fn, tb))
 
 
-    def load(self, section, key, dtype='json', default=None):
+    def _validate(self, data, min_ver):
+        version = try_get(data, lambda x: x[self._VERSION_KEY])
+        if not version:  # Backward compatibility
+            data, version = {'data': data}, self._DEFAULT_VERSION
+        if not is_outdated_version(version, min_ver or '0', assume_new=False):
+            return data['data']
+        self._ydl.to_screen(
+            'Discarding old cache from version {version} (needs {min_ver})'.format(**locals()))
+
+    def load(self, section, key, dtype='json', default=None, min_ver=None):
         assert dtype in ('json',)
         assert dtype in ('json',)
 
 
         if not self.enabled:
         if not self.enabled:
@@ -65,13 +85,13 @@ class Cache(object):
         cache_fn = self._get_cache_fn(section, key, dtype)
         cache_fn = self._get_cache_fn(section, key, dtype)
         try:
         try:
             try:
             try:
-                with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
-                    return json.load(cachef)
+                with open(cache_fn, 'r', encoding='utf-8') as cachef:
+                    return self._validate(json.load(cachef), min_ver)
             except ValueError:
             except ValueError:
                 try:
                 try:
                     file_size = os.path.getsize(cache_fn)
                     file_size = os.path.getsize(cache_fn)
                 except (OSError, IOError) as oe:
                 except (OSError, IOError) as oe:
-                    file_size = str(oe)
+                    file_size = error_to_compat_str(oe)
                 self._ydl.report_warning(
                 self._ydl.report_warning(
                     'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
                     'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
         except IOError:
         except IOError:

+ 1667 - 0
youtube_dl/casefold.py

@@ -0,0 +1,1667 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .compat import (
+    compat_str,
+    compat_chr,
+)
+
+# Below is included the text of icu/CaseFolding.txt retrieved from
+# https://github.com/unicode-org/icu/blob/main/icu4c/source/data/unidata/CaseFolding.txt
+# In case newly foldable Unicode characters are defined, paste the new version
+# of the text inside the ''' marks.
+# The text is expected to have only blank lines andlines with 1st character #,
+# all ignored, and fold definitions like this:
+# `from_hex_code; space_separated_to_hex_code_list; comment`
+
+_map_str = '''
+# CaseFolding-15.0.0.txt
+# Date: 2022-02-02, 23:35:35 GMT
+# © 2022 Unicode®, Inc.
+# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.
+# For terms of use, see https://www.unicode.org/terms_of_use.html
+#
+# Unicode Character Database
+#   For documentation, see https://www.unicode.org/reports/tr44/
+#
+# Case Folding Properties
+#
+# This file is a supplement to the UnicodeData file.
+# It provides a case folding mapping generated from the Unicode Character Database.
+# If all characters are mapped according to the full mapping below, then
+# case differences (according to UnicodeData.txt and SpecialCasing.txt)
+# are eliminated.
+#
+# The data supports both implementations that require simple case foldings
+# (where string lengths don't change), and implementations that allow full case folding
+# (where string lengths may grow). Note that where they can be supported, the
+# full case foldings are superior: for example, they allow "MASSE" and "Maße" to match.
+#
+# All code points not listed in this file map to themselves.
+#
+# NOTE: case folding does not preserve normalization formats!
+#
+# For information on case folding, including how to have case folding
+# preserve normalization formats, see Section 3.13 Default Case Algorithms in
+# The Unicode Standard.
+#
+# ================================================================================
+# Format
+# ================================================================================
+# The entries in this file are in the following machine-readable format:
+#
+# <code>; <status>; <mapping>; # <name>
+#
+# The status field is:
+# C: common case folding, common mappings shared by both simple and full mappings.
+# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces.
+# S: simple case folding, mappings to single characters where different from F.
+# T: special case for uppercase I and dotted uppercase I
+#    - For non-Turkic languages, this mapping is normally not used.
+#    - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters.
+#      Note that the Turkic mappings do not maintain canonical equivalence without additional processing.
+#      See the discussions of case mapping in the Unicode Standard for more information.
+#
+# Usage:
+#  A. To do a simple case folding, use the mappings with status C + S.
+#  B. To do a full case folding, use the mappings with status C + F.
+#
+#    The mappings with status T can be used or omitted depending on the desired case-folding
+#    behavior. (The default option is to exclude them.)
+#
+# =================================================================
+
+# Property: Case_Folding
+
+#  All code points not explicitly listed for Case_Folding
+#  have the value C for the status field, and the code point itself for the mapping field.
+
+# =================================================================
+0041; C; 0061; # LATIN CAPITAL LETTER A
+0042; C; 0062; # LATIN CAPITAL LETTER B
+0043; C; 0063; # LATIN CAPITAL LETTER C
+0044; C; 0064; # LATIN CAPITAL LETTER D
+0045; C; 0065; # LATIN CAPITAL LETTER E
+0046; C; 0066; # LATIN CAPITAL LETTER F
+0047; C; 0067; # LATIN CAPITAL LETTER G
+0048; C; 0068; # LATIN CAPITAL LETTER H
+0049; C; 0069; # LATIN CAPITAL LETTER I
+0049; T; 0131; # LATIN CAPITAL LETTER I
+004A; C; 006A; # LATIN CAPITAL LETTER J
+004B; C; 006B; # LATIN CAPITAL LETTER K
+004C; C; 006C; # LATIN CAPITAL LETTER L
+004D; C; 006D; # LATIN CAPITAL LETTER M
+004E; C; 006E; # LATIN CAPITAL LETTER N
+004F; C; 006F; # LATIN CAPITAL LETTER O
+0050; C; 0070; # LATIN CAPITAL LETTER P
+0051; C; 0071; # LATIN CAPITAL LETTER Q
+0052; C; 0072; # LATIN CAPITAL LETTER R
+0053; C; 0073; # LATIN CAPITAL LETTER S
+0054; C; 0074; # LATIN CAPITAL LETTER T
+0055; C; 0075; # LATIN CAPITAL LETTER U
+0056; C; 0076; # LATIN CAPITAL LETTER V
+0057; C; 0077; # LATIN CAPITAL LETTER W
+0058; C; 0078; # LATIN CAPITAL LETTER X
+0059; C; 0079; # LATIN CAPITAL LETTER Y
+005A; C; 007A; # LATIN CAPITAL LETTER Z
+00B5; C; 03BC; # MICRO SIGN
+00C0; C; 00E0; # LATIN CAPITAL LETTER A WITH GRAVE
+00C1; C; 00E1; # LATIN CAPITAL LETTER A WITH ACUTE
+00C2; C; 00E2; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+00C3; C; 00E3; # LATIN CAPITAL LETTER A WITH TILDE
+00C4; C; 00E4; # LATIN CAPITAL LETTER A WITH DIAERESIS
+00C5; C; 00E5; # LATIN CAPITAL LETTER A WITH RING ABOVE
+00C6; C; 00E6; # LATIN CAPITAL LETTER AE
+00C7; C; 00E7; # LATIN CAPITAL LETTER C WITH CEDILLA
+00C8; C; 00E8; # LATIN CAPITAL LETTER E WITH GRAVE
+00C9; C; 00E9; # LATIN CAPITAL LETTER E WITH ACUTE
+00CA; C; 00EA; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+00CB; C; 00EB; # LATIN CAPITAL LETTER E WITH DIAERESIS
+00CC; C; 00EC; # LATIN CAPITAL LETTER I WITH GRAVE
+00CD; C; 00ED; # LATIN CAPITAL LETTER I WITH ACUTE
+00CE; C; 00EE; # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+00CF; C; 00EF; # LATIN CAPITAL LETTER I WITH DIAERESIS
+00D0; C; 00F0; # LATIN CAPITAL LETTER ETH
+00D1; C; 00F1; # LATIN CAPITAL LETTER N WITH TILDE
+00D2; C; 00F2; # LATIN CAPITAL LETTER O WITH GRAVE
+00D3; C; 00F3; # LATIN CAPITAL LETTER O WITH ACUTE
+00D4; C; 00F4; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+00D5; C; 00F5; # LATIN CAPITAL LETTER O WITH TILDE
+00D6; C; 00F6; # LATIN CAPITAL LETTER O WITH DIAERESIS
+00D8; C; 00F8; # LATIN CAPITAL LETTER O WITH STROKE
+00D9; C; 00F9; # LATIN CAPITAL LETTER U WITH GRAVE
+00DA; C; 00FA; # LATIN CAPITAL LETTER U WITH ACUTE
+00DB; C; 00FB; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+00DC; C; 00FC; # LATIN CAPITAL LETTER U WITH DIAERESIS
+00DD; C; 00FD; # LATIN CAPITAL LETTER Y WITH ACUTE
+00DE; C; 00FE; # LATIN CAPITAL LETTER THORN
+00DF; F; 0073 0073; # LATIN SMALL LETTER SHARP S
+0100; C; 0101; # LATIN CAPITAL LETTER A WITH MACRON
+0102; C; 0103; # LATIN CAPITAL LETTER A WITH BREVE
+0104; C; 0105; # LATIN CAPITAL LETTER A WITH OGONEK
+0106; C; 0107; # LATIN CAPITAL LETTER C WITH ACUTE
+0108; C; 0109; # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+010A; C; 010B; # LATIN CAPITAL LETTER C WITH DOT ABOVE
+010C; C; 010D; # LATIN CAPITAL LETTER C WITH CARON
+010E; C; 010F; # LATIN CAPITAL LETTER D WITH CARON
+0110; C; 0111; # LATIN CAPITAL LETTER D WITH STROKE
+0112; C; 0113; # LATIN CAPITAL LETTER E WITH MACRON
+0114; C; 0115; # LATIN CAPITAL LETTER E WITH BREVE
+0116; C; 0117; # LATIN CAPITAL LETTER E WITH DOT ABOVE
+0118; C; 0119; # LATIN CAPITAL LETTER E WITH OGONEK
+011A; C; 011B; # LATIN CAPITAL LETTER E WITH CARON
+011C; C; 011D; # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+011E; C; 011F; # LATIN CAPITAL LETTER G WITH BREVE
+0120; C; 0121; # LATIN CAPITAL LETTER G WITH DOT ABOVE
+0122; C; 0123; # LATIN CAPITAL LETTER G WITH CEDILLA
+0124; C; 0125; # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+0126; C; 0127; # LATIN CAPITAL LETTER H WITH STROKE
+0128; C; 0129; # LATIN CAPITAL LETTER I WITH TILDE
+012A; C; 012B; # LATIN CAPITAL LETTER I WITH MACRON
+012C; C; 012D; # LATIN CAPITAL LETTER I WITH BREVE
+012E; C; 012F; # LATIN CAPITAL LETTER I WITH OGONEK
+0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+0132; C; 0133; # LATIN CAPITAL LIGATURE IJ
+0134; C; 0135; # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+0136; C; 0137; # LATIN CAPITAL LETTER K WITH CEDILLA
+0139; C; 013A; # LATIN CAPITAL LETTER L WITH ACUTE
+013B; C; 013C; # LATIN CAPITAL LETTER L WITH CEDILLA
+013D; C; 013E; # LATIN CAPITAL LETTER L WITH CARON
+013F; C; 0140; # LATIN CAPITAL LETTER L WITH MIDDLE DOT
+0141; C; 0142; # LATIN CAPITAL LETTER L WITH STROKE
+0143; C; 0144; # LATIN CAPITAL LETTER N WITH ACUTE
+0145; C; 0146; # LATIN CAPITAL LETTER N WITH CEDILLA
+0147; C; 0148; # LATIN CAPITAL LETTER N WITH CARON
+0149; F; 02BC 006E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+014A; C; 014B; # LATIN CAPITAL LETTER ENG
+014C; C; 014D; # LATIN CAPITAL LETTER O WITH MACRON
+014E; C; 014F; # LATIN CAPITAL LETTER O WITH BREVE
+0150; C; 0151; # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+0152; C; 0153; # LATIN CAPITAL LIGATURE OE
+0154; C; 0155; # LATIN CAPITAL LETTER R WITH ACUTE
+0156; C; 0157; # LATIN CAPITAL LETTER R WITH CEDILLA
+0158; C; 0159; # LATIN CAPITAL LETTER R WITH CARON
+015A; C; 015B; # LATIN CAPITAL LETTER S WITH ACUTE
+015C; C; 015D; # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+015E; C; 015F; # LATIN CAPITAL LETTER S WITH CEDILLA
+0160; C; 0161; # LATIN CAPITAL LETTER S WITH CARON
+0162; C; 0163; # LATIN CAPITAL LETTER T WITH CEDILLA
+0164; C; 0165; # LATIN CAPITAL LETTER T WITH CARON
+0166; C; 0167; # LATIN CAPITAL LETTER T WITH STROKE
+0168; C; 0169; # LATIN CAPITAL LETTER U WITH TILDE
+016A; C; 016B; # LATIN CAPITAL LETTER U WITH MACRON
+016C; C; 016D; # LATIN CAPITAL LETTER U WITH BREVE
+016E; C; 016F; # LATIN CAPITAL LETTER U WITH RING ABOVE
+0170; C; 0171; # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+0172; C; 0173; # LATIN CAPITAL LETTER U WITH OGONEK
+0174; C; 0175; # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+0176; C; 0177; # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+0178; C; 00FF; # LATIN CAPITAL LETTER Y WITH DIAERESIS
+0179; C; 017A; # LATIN CAPITAL LETTER Z WITH ACUTE
+017B; C; 017C; # LATIN CAPITAL LETTER Z WITH DOT ABOVE
+017D; C; 017E; # LATIN CAPITAL LETTER Z WITH CARON
+017F; C; 0073; # LATIN SMALL LETTER LONG S
+0181; C; 0253; # LATIN CAPITAL LETTER B WITH HOOK
+0182; C; 0183; # LATIN CAPITAL LETTER B WITH TOPBAR
+0184; C; 0185; # LATIN CAPITAL LETTER TONE SIX
+0186; C; 0254; # LATIN CAPITAL LETTER OPEN O
+0187; C; 0188; # LATIN CAPITAL LETTER C WITH HOOK
+0189; C; 0256; # LATIN CAPITAL LETTER AFRICAN D
+018A; C; 0257; # LATIN CAPITAL LETTER D WITH HOOK
+018B; C; 018C; # LATIN CAPITAL LETTER D WITH TOPBAR
+018E; C; 01DD; # LATIN CAPITAL LETTER REVERSED E
+018F; C; 0259; # LATIN CAPITAL LETTER SCHWA
+0190; C; 025B; # LATIN CAPITAL LETTER OPEN E
+0191; C; 0192; # LATIN CAPITAL LETTER F WITH HOOK
+0193; C; 0260; # LATIN CAPITAL LETTER G WITH HOOK
+0194; C; 0263; # LATIN CAPITAL LETTER GAMMA
+0196; C; 0269; # LATIN CAPITAL LETTER IOTA
+0197; C; 0268; # LATIN CAPITAL LETTER I WITH STROKE
+0198; C; 0199; # LATIN CAPITAL LETTER K WITH HOOK
+019C; C; 026F; # LATIN CAPITAL LETTER TURNED M
+019D; C; 0272; # LATIN CAPITAL LETTER N WITH LEFT HOOK
+019F; C; 0275; # LATIN CAPITAL LETTER O WITH MIDDLE TILDE
+01A0; C; 01A1; # LATIN CAPITAL LETTER O WITH HORN
+01A2; C; 01A3; # LATIN CAPITAL LETTER OI
+01A4; C; 01A5; # LATIN CAPITAL LETTER P WITH HOOK
+01A6; C; 0280; # LATIN LETTER YR
+01A7; C; 01A8; # LATIN CAPITAL LETTER TONE TWO
+01A9; C; 0283; # LATIN CAPITAL LETTER ESH
+01AC; C; 01AD; # LATIN CAPITAL LETTER T WITH HOOK
+01AE; C; 0288; # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK
+01AF; C; 01B0; # LATIN CAPITAL LETTER U WITH HORN
+01B1; C; 028A; # LATIN CAPITAL LETTER UPSILON
+01B2; C; 028B; # LATIN CAPITAL LETTER V WITH HOOK
+01B3; C; 01B4; # LATIN CAPITAL LETTER Y WITH HOOK
+01B5; C; 01B6; # LATIN CAPITAL LETTER Z WITH STROKE
+01B7; C; 0292; # LATIN CAPITAL LETTER EZH
+01B8; C; 01B9; # LATIN CAPITAL LETTER EZH REVERSED
+01BC; C; 01BD; # LATIN CAPITAL LETTER TONE FIVE
+01C4; C; 01C6; # LATIN CAPITAL LETTER DZ WITH CARON
+01C5; C; 01C6; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON
+01C7; C; 01C9; # LATIN CAPITAL LETTER LJ
+01C8; C; 01C9; # LATIN CAPITAL LETTER L WITH SMALL LETTER J
+01CA; C; 01CC; # LATIN CAPITAL LETTER NJ
+01CB; C; 01CC; # LATIN CAPITAL LETTER N WITH SMALL LETTER J
+01CD; C; 01CE; # LATIN CAPITAL LETTER A WITH CARON
+01CF; C; 01D0; # LATIN CAPITAL LETTER I WITH CARON
+01D1; C; 01D2; # LATIN CAPITAL LETTER O WITH CARON
+01D3; C; 01D4; # LATIN CAPITAL LETTER U WITH CARON
+01D5; C; 01D6; # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
+01D7; C; 01D8; # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
+01D9; C; 01DA; # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
+01DB; C; 01DC; # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
+01DE; C; 01DF; # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
+01E0; C; 01E1; # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
+01E2; C; 01E3; # LATIN CAPITAL LETTER AE WITH MACRON
+01E4; C; 01E5; # LATIN CAPITAL LETTER G WITH STROKE
+01E6; C; 01E7; # LATIN CAPITAL LETTER G WITH CARON
+01E8; C; 01E9; # LATIN CAPITAL LETTER K WITH CARON
+01EA; C; 01EB; # LATIN CAPITAL LETTER O WITH OGONEK
+01EC; C; 01ED; # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
+01EE; C; 01EF; # LATIN CAPITAL LETTER EZH WITH CARON
+01F0; F; 006A 030C; # LATIN SMALL LETTER J WITH CARON
+01F1; C; 01F3; # LATIN CAPITAL LETTER DZ
+01F2; C; 01F3; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z
+01F4; C; 01F5; # LATIN CAPITAL LETTER G WITH ACUTE
+01F6; C; 0195; # LATIN CAPITAL LETTER HWAIR
+01F7; C; 01BF; # LATIN CAPITAL LETTER WYNN
+01F8; C; 01F9; # LATIN CAPITAL LETTER N WITH GRAVE
+01FA; C; 01FB; # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+01FC; C; 01FD; # LATIN CAPITAL LETTER AE WITH ACUTE
+01FE; C; 01FF; # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+0200; C; 0201; # LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
+0202; C; 0203; # LATIN CAPITAL LETTER A WITH INVERTED BREVE
+0204; C; 0205; # LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
+0206; C; 0207; # LATIN CAPITAL LETTER E WITH INVERTED BREVE
+0208; C; 0209; # LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
+020A; C; 020B; # LATIN CAPITAL LETTER I WITH INVERTED BREVE
+020C; C; 020D; # LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
+020E; C; 020F; # LATIN CAPITAL LETTER O WITH INVERTED BREVE
+0210; C; 0211; # LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
+0212; C; 0213; # LATIN CAPITAL LETTER R WITH INVERTED BREVE
+0214; C; 0215; # LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
+0216; C; 0217; # LATIN CAPITAL LETTER U WITH INVERTED BREVE
+0218; C; 0219; # LATIN CAPITAL LETTER S WITH COMMA BELOW
+021A; C; 021B; # LATIN CAPITAL LETTER T WITH COMMA BELOW
+021C; C; 021D; # LATIN CAPITAL LETTER YOGH
+021E; C; 021F; # LATIN CAPITAL LETTER H WITH CARON
+0220; C; 019E; # LATIN CAPITAL LETTER N WITH LONG RIGHT LEG
+0222; C; 0223; # LATIN CAPITAL LETTER OU
+0224; C; 0225; # LATIN CAPITAL LETTER Z WITH HOOK
+0226; C; 0227; # LATIN CAPITAL LETTER A WITH DOT ABOVE
+0228; C; 0229; # LATIN CAPITAL LETTER E WITH CEDILLA
+022A; C; 022B; # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
+022C; C; 022D; # LATIN CAPITAL LETTER O WITH TILDE AND MACRON
+022E; C; 022F; # LATIN CAPITAL LETTER O WITH DOT ABOVE
+0230; C; 0231; # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
+0232; C; 0233; # LATIN CAPITAL LETTER Y WITH MACRON
+023A; C; 2C65; # LATIN CAPITAL LETTER A WITH STROKE
+023B; C; 023C; # LATIN CAPITAL LETTER C WITH STROKE
+023D; C; 019A; # LATIN CAPITAL LETTER L WITH BAR
+023E; C; 2C66; # LATIN CAPITAL LETTER T WITH DIAGONAL STROKE
+0241; C; 0242; # LATIN CAPITAL LETTER GLOTTAL STOP
+0243; C; 0180; # LATIN CAPITAL LETTER B WITH STROKE
+0244; C; 0289; # LATIN CAPITAL LETTER U BAR
+0245; C; 028C; # LATIN CAPITAL LETTER TURNED V
+0246; C; 0247; # LATIN CAPITAL LETTER E WITH STROKE
+0248; C; 0249; # LATIN CAPITAL LETTER J WITH STROKE
+024A; C; 024B; # LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL
+024C; C; 024D; # LATIN CAPITAL LETTER R WITH STROKE
+024E; C; 024F; # LATIN CAPITAL LETTER Y WITH STROKE
+0345; C; 03B9; # COMBINING GREEK YPOGEGRAMMENI
+0370; C; 0371; # GREEK CAPITAL LETTER HETA
+0372; C; 0373; # GREEK CAPITAL LETTER ARCHAIC SAMPI
+0376; C; 0377; # GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA
+037F; C; 03F3; # GREEK CAPITAL LETTER YOT
+0386; C; 03AC; # GREEK CAPITAL LETTER ALPHA WITH TONOS
+0388; C; 03AD; # GREEK CAPITAL LETTER EPSILON WITH TONOS
+0389; C; 03AE; # GREEK CAPITAL LETTER ETA WITH TONOS
+038A; C; 03AF; # GREEK CAPITAL LETTER IOTA WITH TONOS
+038C; C; 03CC; # GREEK CAPITAL LETTER OMICRON WITH TONOS
+038E; C; 03CD; # GREEK CAPITAL LETTER UPSILON WITH TONOS
+038F; C; 03CE; # GREEK CAPITAL LETTER OMEGA WITH TONOS
+0390; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+0391; C; 03B1; # GREEK CAPITAL LETTER ALPHA
+0392; C; 03B2; # GREEK CAPITAL LETTER BETA
+0393; C; 03B3; # GREEK CAPITAL LETTER GAMMA
+0394; C; 03B4; # GREEK CAPITAL LETTER DELTA
+0395; C; 03B5; # GREEK CAPITAL LETTER EPSILON
+0396; C; 03B6; # GREEK CAPITAL LETTER ZETA
+0397; C; 03B7; # GREEK CAPITAL LETTER ETA
+0398; C; 03B8; # GREEK CAPITAL LETTER THETA
+0399; C; 03B9; # GREEK CAPITAL LETTER IOTA
+039A; C; 03BA; # GREEK CAPITAL LETTER KAPPA
+039B; C; 03BB; # GREEK CAPITAL LETTER LAMDA
+039C; C; 03BC; # GREEK CAPITAL LETTER MU
+039D; C; 03BD; # GREEK CAPITAL LETTER NU
+039E; C; 03BE; # GREEK CAPITAL LETTER XI
+039F; C; 03BF; # GREEK CAPITAL LETTER OMICRON
+03A0; C; 03C0; # GREEK CAPITAL LETTER PI
+03A1; C; 03C1; # GREEK CAPITAL LETTER RHO
+03A3; C; 03C3; # GREEK CAPITAL LETTER SIGMA
+03A4; C; 03C4; # GREEK CAPITAL LETTER TAU
+03A5; C; 03C5; # GREEK CAPITAL LETTER UPSILON
+03A6; C; 03C6; # GREEK CAPITAL LETTER PHI
+03A7; C; 03C7; # GREEK CAPITAL LETTER CHI
+03A8; C; 03C8; # GREEK CAPITAL LETTER PSI
+03A9; C; 03C9; # GREEK CAPITAL LETTER OMEGA
+03AA; C; 03CA; # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+03AB; C; 03CB; # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+03B0; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+03C2; C; 03C3; # GREEK SMALL LETTER FINAL SIGMA
+03CF; C; 03D7; # GREEK CAPITAL KAI SYMBOL
+03D0; C; 03B2; # GREEK BETA SYMBOL
+03D1; C; 03B8; # GREEK THETA SYMBOL
+03D5; C; 03C6; # GREEK PHI SYMBOL
+03D6; C; 03C0; # GREEK PI SYMBOL
+03D8; C; 03D9; # GREEK LETTER ARCHAIC KOPPA
+03DA; C; 03DB; # GREEK LETTER STIGMA
+03DC; C; 03DD; # GREEK LETTER DIGAMMA
+03DE; C; 03DF; # GREEK LETTER KOPPA
+03E0; C; 03E1; # GREEK LETTER SAMPI
+03E2; C; 03E3; # COPTIC CAPITAL LETTER SHEI
+03E4; C; 03E5; # COPTIC CAPITAL LETTER FEI
+03E6; C; 03E7; # COPTIC CAPITAL LETTER KHEI
+03E8; C; 03E9; # COPTIC CAPITAL LETTER HORI
+03EA; C; 03EB; # COPTIC CAPITAL LETTER GANGIA
+03EC; C; 03ED; # COPTIC CAPITAL LETTER SHIMA
+03EE; C; 03EF; # COPTIC CAPITAL LETTER DEI
+03F0; C; 03BA; # GREEK KAPPA SYMBOL
+03F1; C; 03C1; # GREEK RHO SYMBOL
+03F4; C; 03B8; # GREEK CAPITAL THETA SYMBOL
+03F5; C; 03B5; # GREEK LUNATE EPSILON SYMBOL
+03F7; C; 03F8; # GREEK CAPITAL LETTER SHO
+03F9; C; 03F2; # GREEK CAPITAL LUNATE SIGMA SYMBOL
+03FA; C; 03FB; # GREEK CAPITAL LETTER SAN
+03FD; C; 037B; # GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL
+03FE; C; 037C; # GREEK CAPITAL DOTTED LUNATE SIGMA SYMBOL
+03FF; C; 037D; # GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL
+0400; C; 0450; # CYRILLIC CAPITAL LETTER IE WITH GRAVE
+0401; C; 0451; # CYRILLIC CAPITAL LETTER IO
+0402; C; 0452; # CYRILLIC CAPITAL LETTER DJE
+0403; C; 0453; # CYRILLIC CAPITAL LETTER GJE
+0404; C; 0454; # CYRILLIC CAPITAL LETTER UKRAINIAN IE
+0405; C; 0455; # CYRILLIC CAPITAL LETTER DZE
+0406; C; 0456; # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
+0407; C; 0457; # CYRILLIC CAPITAL LETTER YI
+0408; C; 0458; # CYRILLIC CAPITAL LETTER JE
+0409; C; 0459; # CYRILLIC CAPITAL LETTER LJE
+040A; C; 045A; # CYRILLIC CAPITAL LETTER NJE
+040B; C; 045B; # CYRILLIC CAPITAL LETTER TSHE
+040C; C; 045C; # CYRILLIC CAPITAL LETTER KJE
+040D; C; 045D; # CYRILLIC CAPITAL LETTER I WITH GRAVE
+040E; C; 045E; # CYRILLIC CAPITAL LETTER SHORT U
+040F; C; 045F; # CYRILLIC CAPITAL LETTER DZHE
+0410; C; 0430; # CYRILLIC CAPITAL LETTER A
+0411; C; 0431; # CYRILLIC CAPITAL LETTER BE
+0412; C; 0432; # CYRILLIC CAPITAL LETTER VE
+0413; C; 0433; # CYRILLIC CAPITAL LETTER GHE
+0414; C; 0434; # CYRILLIC CAPITAL LETTER DE
+0415; C; 0435; # CYRILLIC CAPITAL LETTER IE
+0416; C; 0436; # CYRILLIC CAPITAL LETTER ZHE
+0417; C; 0437; # CYRILLIC CAPITAL LETTER ZE
+0418; C; 0438; # CYRILLIC CAPITAL LETTER I
+0419; C; 0439; # CYRILLIC CAPITAL LETTER SHORT I
+041A; C; 043A; # CYRILLIC CAPITAL LETTER KA
+041B; C; 043B; # CYRILLIC CAPITAL LETTER EL
+041C; C; 043C; # CYRILLIC CAPITAL LETTER EM
+041D; C; 043D; # CYRILLIC CAPITAL LETTER EN
+041E; C; 043E; # CYRILLIC CAPITAL LETTER O
+041F; C; 043F; # CYRILLIC CAPITAL LETTER PE
+0420; C; 0440; # CYRILLIC CAPITAL LETTER ER
+0421; C; 0441; # CYRILLIC CAPITAL LETTER ES
+0422; C; 0442; # CYRILLIC CAPITAL LETTER TE
+0423; C; 0443; # CYRILLIC CAPITAL LETTER U
+0424; C; 0444; # CYRILLIC CAPITAL LETTER EF
+0425; C; 0445; # CYRILLIC CAPITAL LETTER HA
+0426; C; 0446; # CYRILLIC CAPITAL LETTER TSE
+0427; C; 0447; # CYRILLIC CAPITAL LETTER CHE
+0428; C; 0448; # CYRILLIC CAPITAL LETTER SHA
+0429; C; 0449; # CYRILLIC CAPITAL LETTER SHCHA
+042A; C; 044A; # CYRILLIC CAPITAL LETTER HARD SIGN
+042B; C; 044B; # CYRILLIC CAPITAL LETTER YERU
+042C; C; 044C; # CYRILLIC CAPITAL LETTER SOFT SIGN
+042D; C; 044D; # CYRILLIC CAPITAL LETTER E
+042E; C; 044E; # CYRILLIC CAPITAL LETTER YU
+042F; C; 044F; # CYRILLIC CAPITAL LETTER YA
+0460; C; 0461; # CYRILLIC CAPITAL LETTER OMEGA
+0462; C; 0463; # CYRILLIC CAPITAL LETTER YAT
+0464; C; 0465; # CYRILLIC CAPITAL LETTER IOTIFIED E
+0466; C; 0467; # CYRILLIC CAPITAL LETTER LITTLE YUS
+0468; C; 0469; # CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS
+046A; C; 046B; # CYRILLIC CAPITAL LETTER BIG YUS
+046C; C; 046D; # CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS
+046E; C; 046F; # CYRILLIC CAPITAL LETTER KSI
+0470; C; 0471; # CYRILLIC CAPITAL LETTER PSI
+0472; C; 0473; # CYRILLIC CAPITAL LETTER FITA
+0474; C; 0475; # CYRILLIC CAPITAL LETTER IZHITSA
+0476; C; 0477; # CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
+0478; C; 0479; # CYRILLIC CAPITAL LETTER UK
+047A; C; 047B; # CYRILLIC CAPITAL LETTER ROUND OMEGA
+047C; C; 047D; # CYRILLIC CAPITAL LETTER OMEGA WITH TITLO
+047E; C; 047F; # CYRILLIC CAPITAL LETTER OT
+0480; C; 0481; # CYRILLIC CAPITAL LETTER KOPPA
+048A; C; 048B; # CYRILLIC CAPITAL LETTER SHORT I WITH TAIL
+048C; C; 048D; # CYRILLIC CAPITAL LETTER SEMISOFT SIGN
+048E; C; 048F; # CYRILLIC CAPITAL LETTER ER WITH TICK
+0490; C; 0491; # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
+0492; C; 0493; # CYRILLIC CAPITAL LETTER GHE WITH STROKE
+0494; C; 0495; # CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK
+0496; C; 0497; # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
+0498; C; 0499; # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
+049A; C; 049B; # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
+049C; C; 049D; # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
+049E; C; 049F; # CYRILLIC CAPITAL LETTER KA WITH STROKE
+04A0; C; 04A1; # CYRILLIC CAPITAL LETTER BASHKIR KA
+04A2; C; 04A3; # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
+04A4; C; 04A5; # CYRILLIC CAPITAL LIGATURE EN GHE
+04A6; C; 04A7; # CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK
+04A8; C; 04A9; # CYRILLIC CAPITAL LETTER ABKHASIAN HA
+04AA; C; 04AB; # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
+04AC; C; 04AD; # CYRILLIC CAPITAL LETTER TE WITH DESCENDER
+04AE; C; 04AF; # CYRILLIC CAPITAL LETTER STRAIGHT U
+04B0; C; 04B1; # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
+04B2; C; 04B3; # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
+04B4; C; 04B5; # CYRILLIC CAPITAL LIGATURE TE TSE
+04B6; C; 04B7; # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
+04B8; C; 04B9; # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
+04BA; C; 04BB; # CYRILLIC CAPITAL LETTER SHHA
+04BC; C; 04BD; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE
+04BE; C; 04BF; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER
+04C0; C; 04CF; # CYRILLIC LETTER PALOCHKA
+04C1; C; 04C2; # CYRILLIC CAPITAL LETTER ZHE WITH BREVE
+04C3; C; 04C4; # CYRILLIC CAPITAL LETTER KA WITH HOOK
+04C5; C; 04C6; # CYRILLIC CAPITAL LETTER EL WITH TAIL
+04C7; C; 04C8; # CYRILLIC CAPITAL LETTER EN WITH HOOK
+04C9; C; 04CA; # CYRILLIC CAPITAL LETTER EN WITH TAIL
+04CB; C; 04CC; # CYRILLIC CAPITAL LETTER KHAKASSIAN CHE
+04CD; C; 04CE; # CYRILLIC CAPITAL LETTER EM WITH TAIL
+04D0; C; 04D1; # CYRILLIC CAPITAL LETTER A WITH BREVE
+04D2; C; 04D3; # CYRILLIC CAPITAL LETTER A WITH DIAERESIS
+04D4; C; 04D5; # CYRILLIC CAPITAL LIGATURE A IE
+04D6; C; 04D7; # CYRILLIC CAPITAL LETTER IE WITH BREVE
+04D8; C; 04D9; # CYRILLIC CAPITAL LETTER SCHWA
+04DA; C; 04DB; # CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
+04DC; C; 04DD; # CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
+04DE; C; 04DF; # CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
+04E0; C; 04E1; # CYRILLIC CAPITAL LETTER ABKHASIAN DZE
+04E2; C; 04E3; # CYRILLIC CAPITAL LETTER I WITH MACRON
+04E4; C; 04E5; # CYRILLIC CAPITAL LETTER I WITH DIAERESIS
+04E6; C; 04E7; # CYRILLIC CAPITAL LETTER O WITH DIAERESIS
+04E8; C; 04E9; # CYRILLIC CAPITAL LETTER BARRED O
+04EA; C; 04EB; # CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS
+04EC; C; 04ED; # CYRILLIC CAPITAL LETTER E WITH DIAERESIS
+04EE; C; 04EF; # CYRILLIC CAPITAL LETTER U WITH MACRON
+04F0; C; 04F1; # CYRILLIC CAPITAL LETTER U WITH DIAERESIS
+04F2; C; 04F3; # CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
+04F4; C; 04F5; # CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
+04F6; C; 04F7; # CYRILLIC CAPITAL LETTER GHE WITH DESCENDER
+04F8; C; 04F9; # CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
+04FA; C; 04FB; # CYRILLIC CAPITAL LETTER GHE WITH STROKE AND HOOK
+04FC; C; 04FD; # CYRILLIC CAPITAL LETTER HA WITH HOOK
+04FE; C; 04FF; # CYRILLIC CAPITAL LETTER HA WITH STROKE
+0500; C; 0501; # CYRILLIC CAPITAL LETTER KOMI DE
+0502; C; 0503; # CYRILLIC CAPITAL LETTER KOMI DJE
+0504; C; 0505; # CYRILLIC CAPITAL LETTER KOMI ZJE
+0506; C; 0507; # CYRILLIC CAPITAL LETTER KOMI DZJE
+0508; C; 0509; # CYRILLIC CAPITAL LETTER KOMI LJE
+050A; C; 050B; # CYRILLIC CAPITAL LETTER KOMI NJE
+050C; C; 050D; # CYRILLIC CAPITAL LETTER KOMI SJE
+050E; C; 050F; # CYRILLIC CAPITAL LETTER KOMI TJE
+0510; C; 0511; # CYRILLIC CAPITAL LETTER REVERSED ZE
+0512; C; 0513; # CYRILLIC CAPITAL LETTER EL WITH HOOK
+0514; C; 0515; # CYRILLIC CAPITAL LETTER LHA
+0516; C; 0517; # CYRILLIC CAPITAL LETTER RHA
+0518; C; 0519; # CYRILLIC CAPITAL LETTER YAE
+051A; C; 051B; # CYRILLIC CAPITAL LETTER QA
+051C; C; 051D; # CYRILLIC CAPITAL LETTER WE
+051E; C; 051F; # CYRILLIC CAPITAL LETTER ALEUT KA
+0520; C; 0521; # CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK
+0522; C; 0523; # CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK
+0524; C; 0525; # CYRILLIC CAPITAL LETTER PE WITH DESCENDER
+0526; C; 0527; # CYRILLIC CAPITAL LETTER SHHA WITH DESCENDER
+0528; C; 0529; # CYRILLIC CAPITAL LETTER EN WITH LEFT HOOK
+052A; C; 052B; # CYRILLIC CAPITAL LETTER DZZHE
+052C; C; 052D; # CYRILLIC CAPITAL LETTER DCHE
+052E; C; 052F; # CYRILLIC CAPITAL LETTER EL WITH DESCENDER
+0531; C; 0561; # ARMENIAN CAPITAL LETTER AYB
+0532; C; 0562; # ARMENIAN CAPITAL LETTER BEN
+0533; C; 0563; # ARMENIAN CAPITAL LETTER GIM
+0534; C; 0564; # ARMENIAN CAPITAL LETTER DA
+0535; C; 0565; # ARMENIAN CAPITAL LETTER ECH
+0536; C; 0566; # ARMENIAN CAPITAL LETTER ZA
+0537; C; 0567; # ARMENIAN CAPITAL LETTER EH
+0538; C; 0568; # ARMENIAN CAPITAL LETTER ET
+0539; C; 0569; # ARMENIAN CAPITAL LETTER TO
+053A; C; 056A; # ARMENIAN CAPITAL LETTER ZHE
+053B; C; 056B; # ARMENIAN CAPITAL LETTER INI
+053C; C; 056C; # ARMENIAN CAPITAL LETTER LIWN
+053D; C; 056D; # ARMENIAN CAPITAL LETTER XEH
+053E; C; 056E; # ARMENIAN CAPITAL LETTER CA
+053F; C; 056F; # ARMENIAN CAPITAL LETTER KEN
+0540; C; 0570; # ARMENIAN CAPITAL LETTER HO
+0541; C; 0571; # ARMENIAN CAPITAL LETTER JA
+0542; C; 0572; # ARMENIAN CAPITAL LETTER GHAD
+0543; C; 0573; # ARMENIAN CAPITAL LETTER CHEH
+0544; C; 0574; # ARMENIAN CAPITAL LETTER MEN
+0545; C; 0575; # ARMENIAN CAPITAL LETTER YI
+0546; C; 0576; # ARMENIAN CAPITAL LETTER NOW
+0547; C; 0577; # ARMENIAN CAPITAL LETTER SHA
+0548; C; 0578; # ARMENIAN CAPITAL LETTER VO
+0549; C; 0579; # ARMENIAN CAPITAL LETTER CHA
+054A; C; 057A; # ARMENIAN CAPITAL LETTER PEH
+054B; C; 057B; # ARMENIAN CAPITAL LETTER JHEH
+054C; C; 057C; # ARMENIAN CAPITAL LETTER RA
+054D; C; 057D; # ARMENIAN CAPITAL LETTER SEH
+054E; C; 057E; # ARMENIAN CAPITAL LETTER VEW
+054F; C; 057F; # ARMENIAN CAPITAL LETTER TIWN
+0550; C; 0580; # ARMENIAN CAPITAL LETTER REH
+0551; C; 0581; # ARMENIAN CAPITAL LETTER CO
+0552; C; 0582; # ARMENIAN CAPITAL LETTER YIWN
+0553; C; 0583; # ARMENIAN CAPITAL LETTER PIWR
+0554; C; 0584; # ARMENIAN CAPITAL LETTER KEH
+0555; C; 0585; # ARMENIAN CAPITAL LETTER OH
+0556; C; 0586; # ARMENIAN CAPITAL LETTER FEH
+0587; F; 0565 0582; # ARMENIAN SMALL LIGATURE ECH YIWN
+10A0; C; 2D00; # GEORGIAN CAPITAL LETTER AN
+10A1; C; 2D01; # GEORGIAN CAPITAL LETTER BAN
+10A2; C; 2D02; # GEORGIAN CAPITAL LETTER GAN
+10A3; C; 2D03; # GEORGIAN CAPITAL LETTER DON
+10A4; C; 2D04; # GEORGIAN CAPITAL LETTER EN
+10A5; C; 2D05; # GEORGIAN CAPITAL LETTER VIN
+10A6; C; 2D06; # GEORGIAN CAPITAL LETTER ZEN
+10A7; C; 2D07; # GEORGIAN CAPITAL LETTER TAN
+10A8; C; 2D08; # GEORGIAN CAPITAL LETTER IN
+10A9; C; 2D09; # GEORGIAN CAPITAL LETTER KAN
+10AA; C; 2D0A; # GEORGIAN CAPITAL LETTER LAS
+10AB; C; 2D0B; # GEORGIAN CAPITAL LETTER MAN
+10AC; C; 2D0C; # GEORGIAN CAPITAL LETTER NAR
+10AD; C; 2D0D; # GEORGIAN CAPITAL LETTER ON
+10AE; C; 2D0E; # GEORGIAN CAPITAL LETTER PAR
+10AF; C; 2D0F; # GEORGIAN CAPITAL LETTER ZHAR
+10B0; C; 2D10; # GEORGIAN CAPITAL LETTER RAE
+10B1; C; 2D11; # GEORGIAN CAPITAL LETTER SAN
+10B2; C; 2D12; # GEORGIAN CAPITAL LETTER TAR
+10B3; C; 2D13; # GEORGIAN CAPITAL LETTER UN
+10B4; C; 2D14; # GEORGIAN CAPITAL LETTER PHAR
+10B5; C; 2D15; # GEORGIAN CAPITAL LETTER KHAR
+10B6; C; 2D16; # GEORGIAN CAPITAL LETTER GHAN
+10B7; C; 2D17; # GEORGIAN CAPITAL LETTER QAR
+10B8; C; 2D18; # GEORGIAN CAPITAL LETTER SHIN
+10B9; C; 2D19; # GEORGIAN CAPITAL LETTER CHIN
+10BA; C; 2D1A; # GEORGIAN CAPITAL LETTER CAN
+10BB; C; 2D1B; # GEORGIAN CAPITAL LETTER JIL
+10BC; C; 2D1C; # GEORGIAN CAPITAL LETTER CIL
+10BD; C; 2D1D; # GEORGIAN CAPITAL LETTER CHAR
+10BE; C; 2D1E; # GEORGIAN CAPITAL LETTER XAN
+10BF; C; 2D1F; # GEORGIAN CAPITAL LETTER JHAN
+10C0; C; 2D20; # GEORGIAN CAPITAL LETTER HAE
+10C1; C; 2D21; # GEORGIAN CAPITAL LETTER HE
+10C2; C; 2D22; # GEORGIAN CAPITAL LETTER HIE
+10C3; C; 2D23; # GEORGIAN CAPITAL LETTER WE
+10C4; C; 2D24; # GEORGIAN CAPITAL LETTER HAR
+10C5; C; 2D25; # GEORGIAN CAPITAL LETTER HOE
+10C7; C; 2D27; # GEORGIAN CAPITAL LETTER YN
+10CD; C; 2D2D; # GEORGIAN CAPITAL LETTER AEN
+13F8; C; 13F0; # CHEROKEE SMALL LETTER YE
+13F9; C; 13F1; # CHEROKEE SMALL LETTER YI
+13FA; C; 13F2; # CHEROKEE SMALL LETTER YO
+13FB; C; 13F3; # CHEROKEE SMALL LETTER YU
+13FC; C; 13F4; # CHEROKEE SMALL LETTER YV
+13FD; C; 13F5; # CHEROKEE SMALL LETTER MV
+1C80; C; 0432; # CYRILLIC SMALL LETTER ROUNDED VE
+1C81; C; 0434; # CYRILLIC SMALL LETTER LONG-LEGGED DE
+1C82; C; 043E; # CYRILLIC SMALL LETTER NARROW O
+1C83; C; 0441; # CYRILLIC SMALL LETTER WIDE ES
+1C84; C; 0442; # CYRILLIC SMALL LETTER TALL TE
+1C85; C; 0442; # CYRILLIC SMALL LETTER THREE-LEGGED TE
+1C86; C; 044A; # CYRILLIC SMALL LETTER TALL HARD SIGN
+1C87; C; 0463; # CYRILLIC SMALL LETTER TALL YAT
+1C88; C; A64B; # CYRILLIC SMALL LETTER UNBLENDED UK
+1C90; C; 10D0; # GEORGIAN MTAVRULI CAPITAL LETTER AN
+1C91; C; 10D1; # GEORGIAN MTAVRULI CAPITAL LETTER BAN
+1C92; C; 10D2; # GEORGIAN MTAVRULI CAPITAL LETTER GAN
+1C93; C; 10D3; # GEORGIAN MTAVRULI CAPITAL LETTER DON
+1C94; C; 10D4; # GEORGIAN MTAVRULI CAPITAL LETTER EN
+1C95; C; 10D5; # GEORGIAN MTAVRULI CAPITAL LETTER VIN
+1C96; C; 10D6; # GEORGIAN MTAVRULI CAPITAL LETTER ZEN
+1C97; C; 10D7; # GEORGIAN MTAVRULI CAPITAL LETTER TAN
+1C98; C; 10D8; # GEORGIAN MTAVRULI CAPITAL LETTER IN
+1C99; C; 10D9; # GEORGIAN MTAVRULI CAPITAL LETTER KAN
+1C9A; C; 10DA; # GEORGIAN MTAVRULI CAPITAL LETTER LAS
+1C9B; C; 10DB; # GEORGIAN MTAVRULI CAPITAL LETTER MAN
+1C9C; C; 10DC; # GEORGIAN MTAVRULI CAPITAL LETTER NAR
+1C9D; C; 10DD; # GEORGIAN MTAVRULI CAPITAL LETTER ON
+1C9E; C; 10DE; # GEORGIAN MTAVRULI CAPITAL LETTER PAR
+1C9F; C; 10DF; # GEORGIAN MTAVRULI CAPITAL LETTER ZHAR
+1CA0; C; 10E0; # GEORGIAN MTAVRULI CAPITAL LETTER RAE
+1CA1; C; 10E1; # GEORGIAN MTAVRULI CAPITAL LETTER SAN
+1CA2; C; 10E2; # GEORGIAN MTAVRULI CAPITAL LETTER TAR
+1CA3; C; 10E3; # GEORGIAN MTAVRULI CAPITAL LETTER UN
+1CA4; C; 10E4; # GEORGIAN MTAVRULI CAPITAL LETTER PHAR
+1CA5; C; 10E5; # GEORGIAN MTAVRULI CAPITAL LETTER KHAR
+1CA6; C; 10E6; # GEORGIAN MTAVRULI CAPITAL LETTER GHAN
+1CA7; C; 10E7; # GEORGIAN MTAVRULI CAPITAL LETTER QAR
+1CA8; C; 10E8; # GEORGIAN MTAVRULI CAPITAL LETTER SHIN
+1CA9; C; 10E9; # GEORGIAN MTAVRULI CAPITAL LETTER CHIN
+1CAA; C; 10EA; # GEORGIAN MTAVRULI CAPITAL LETTER CAN
+1CAB; C; 10EB; # GEORGIAN MTAVRULI CAPITAL LETTER JIL
+1CAC; C; 10EC; # GEORGIAN MTAVRULI CAPITAL LETTER CIL
+1CAD; C; 10ED; # GEORGIAN MTAVRULI CAPITAL LETTER CHAR
+1CAE; C; 10EE; # GEORGIAN MTAVRULI CAPITAL LETTER XAN
+1CAF; C; 10EF; # GEORGIAN MTAVRULI CAPITAL LETTER JHAN
+1CB0; C; 10F0; # GEORGIAN MTAVRULI CAPITAL LETTER HAE
+1CB1; C; 10F1; # GEORGIAN MTAVRULI CAPITAL LETTER HE
+1CB2; C; 10F2; # GEORGIAN MTAVRULI CAPITAL LETTER HIE
+1CB3; C; 10F3; # GEORGIAN MTAVRULI CAPITAL LETTER WE
+1CB4; C; 10F4; # GEORGIAN MTAVRULI CAPITAL LETTER HAR
+1CB5; C; 10F5; # GEORGIAN MTAVRULI CAPITAL LETTER HOE
+1CB6; C; 10F6; # GEORGIAN MTAVRULI CAPITAL LETTER FI
+1CB7; C; 10F7; # GEORGIAN MTAVRULI CAPITAL LETTER YN
+1CB8; C; 10F8; # GEORGIAN MTAVRULI CAPITAL LETTER ELIFI
+1CB9; C; 10F9; # GEORGIAN MTAVRULI CAPITAL LETTER TURNED GAN
+1CBA; C; 10FA; # GEORGIAN MTAVRULI CAPITAL LETTER AIN
+1CBD; C; 10FD; # GEORGIAN MTAVRULI CAPITAL LETTER AEN
+1CBE; C; 10FE; # GEORGIAN MTAVRULI CAPITAL LETTER HARD SIGN
+1CBF; C; 10FF; # GEORGIAN MTAVRULI CAPITAL LETTER LABIAL SIGN
+1E00; C; 1E01; # LATIN CAPITAL LETTER A WITH RING BELOW
+1E02; C; 1E03; # LATIN CAPITAL LETTER B WITH DOT ABOVE
+1E04; C; 1E05; # LATIN CAPITAL LETTER B WITH DOT BELOW
+1E06; C; 1E07; # LATIN CAPITAL LETTER B WITH LINE BELOW
+1E08; C; 1E09; # LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
+1E0A; C; 1E0B; # LATIN CAPITAL LETTER D WITH DOT ABOVE
+1E0C; C; 1E0D; # LATIN CAPITAL LETTER D WITH DOT BELOW
+1E0E; C; 1E0F; # LATIN CAPITAL LETTER D WITH LINE BELOW
+1E10; C; 1E11; # LATIN CAPITAL LETTER D WITH CEDILLA
+1E12; C; 1E13; # LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
+1E14; C; 1E15; # LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
+1E16; C; 1E17; # LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
+1E18; C; 1E19; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
+1E1A; C; 1E1B; # LATIN CAPITAL LETTER E WITH TILDE BELOW
+1E1C; C; 1E1D; # LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
+1E1E; C; 1E1F; # LATIN CAPITAL LETTER F WITH DOT ABOVE
+1E20; C; 1E21; # LATIN CAPITAL LETTER G WITH MACRON
+1E22; C; 1E23; # LATIN CAPITAL LETTER H WITH DOT ABOVE
+1E24; C; 1E25; # LATIN CAPITAL LETTER H WITH DOT BELOW
+1E26; C; 1E27; # LATIN CAPITAL LETTER H WITH DIAERESIS
+1E28; C; 1E29; # LATIN CAPITAL LETTER H WITH CEDILLA
+1E2A; C; 1E2B; # LATIN CAPITAL LETTER H WITH BREVE BELOW
+1E2C; C; 1E2D; # LATIN CAPITAL LETTER I WITH TILDE BELOW
+1E2E; C; 1E2F; # LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
+1E30; C; 1E31; # LATIN CAPITAL LETTER K WITH ACUTE
+1E32; C; 1E33; # LATIN CAPITAL LETTER K WITH DOT BELOW
+1E34; C; 1E35; # LATIN CAPITAL LETTER K WITH LINE BELOW
+1E36; C; 1E37; # LATIN CAPITAL LETTER L WITH DOT BELOW
+1E38; C; 1E39; # LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
+1E3A; C; 1E3B; # LATIN CAPITAL LETTER L WITH LINE BELOW
+1E3C; C; 1E3D; # LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
+1E3E; C; 1E3F; # LATIN CAPITAL LETTER M WITH ACUTE
+1E40; C; 1E41; # LATIN CAPITAL LETTER M WITH DOT ABOVE
+1E42; C; 1E43; # LATIN CAPITAL LETTER M WITH DOT BELOW
+1E44; C; 1E45; # LATIN CAPITAL LETTER N WITH DOT ABOVE
+1E46; C; 1E47; # LATIN CAPITAL LETTER N WITH DOT BELOW
+1E48; C; 1E49; # LATIN CAPITAL LETTER N WITH LINE BELOW
+1E4A; C; 1E4B; # LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
+1E4C; C; 1E4D; # LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
+1E4E; C; 1E4F; # LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
+1E50; C; 1E51; # LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
+1E52; C; 1E53; # LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
+1E54; C; 1E55; # LATIN CAPITAL LETTER P WITH ACUTE
+1E56; C; 1E57; # LATIN CAPITAL LETTER P WITH DOT ABOVE
+1E58; C; 1E59; # LATIN CAPITAL LETTER R WITH DOT ABOVE
+1E5A; C; 1E5B; # LATIN CAPITAL LETTER R WITH DOT BELOW
+1E5C; C; 1E5D; # LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
+1E5E; C; 1E5F; # LATIN CAPITAL LETTER R WITH LINE BELOW
+1E60; C; 1E61; # LATIN CAPITAL LETTER S WITH DOT ABOVE
+1E62; C; 1E63; # LATIN CAPITAL LETTER S WITH DOT BELOW
+1E64; C; 1E65; # LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
+1E66; C; 1E67; # LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
+1E68; C; 1E69; # LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
+1E6A; C; 1E6B; # LATIN CAPITAL LETTER T WITH DOT ABOVE
+1E6C; C; 1E6D; # LATIN CAPITAL LETTER T WITH DOT BELOW
+1E6E; C; 1E6F; # LATIN CAPITAL LETTER T WITH LINE BELOW
+1E70; C; 1E71; # LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
+1E72; C; 1E73; # LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
+1E74; C; 1E75; # LATIN CAPITAL LETTER U WITH TILDE BELOW
+1E76; C; 1E77; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
+1E78; C; 1E79; # LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
+1E7A; C; 1E7B; # LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
+1E7C; C; 1E7D; # LATIN CAPITAL LETTER V WITH TILDE
+1E7E; C; 1E7F; # LATIN CAPITAL LETTER V WITH DOT BELOW
+1E80; C; 1E81; # LATIN CAPITAL LETTER W WITH GRAVE
+1E82; C; 1E83; # LATIN CAPITAL LETTER W WITH ACUTE
+1E84; C; 1E85; # LATIN CAPITAL LETTER W WITH DIAERESIS
+1E86; C; 1E87; # LATIN CAPITAL LETTER W WITH DOT ABOVE
+1E88; C; 1E89; # LATIN CAPITAL LETTER W WITH DOT BELOW
+1E8A; C; 1E8B; # LATIN CAPITAL LETTER X WITH DOT ABOVE
+1E8C; C; 1E8D; # LATIN CAPITAL LETTER X WITH DIAERESIS
+1E8E; C; 1E8F; # LATIN CAPITAL LETTER Y WITH DOT ABOVE
+1E90; C; 1E91; # LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
+1E92; C; 1E93; # LATIN CAPITAL LETTER Z WITH DOT BELOW
+1E94; C; 1E95; # LATIN CAPITAL LETTER Z WITH LINE BELOW
+1E96; F; 0068 0331; # LATIN SMALL LETTER H WITH LINE BELOW
+1E97; F; 0074 0308; # LATIN SMALL LETTER T WITH DIAERESIS
+1E98; F; 0077 030A; # LATIN SMALL LETTER W WITH RING ABOVE
+1E99; F; 0079 030A; # LATIN SMALL LETTER Y WITH RING ABOVE
+1E9A; F; 0061 02BE; # LATIN SMALL LETTER A WITH RIGHT HALF RING
+1E9B; C; 1E61; # LATIN SMALL LETTER LONG S WITH DOT ABOVE
+1E9E; F; 0073 0073; # LATIN CAPITAL LETTER SHARP S
+1E9E; S; 00DF; # LATIN CAPITAL LETTER SHARP S
+1EA0; C; 1EA1; # LATIN CAPITAL LETTER A WITH DOT BELOW
+1EA2; C; 1EA3; # LATIN CAPITAL LETTER A WITH HOOK ABOVE
+1EA4; C; 1EA5; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
+1EA6; C; 1EA7; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
+1EA8; C; 1EA9; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
+1EAA; C; 1EAB; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
+1EAC; C; 1EAD; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
+1EAE; C; 1EAF; # LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
+1EB0; C; 1EB1; # LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
+1EB2; C; 1EB3; # LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
+1EB4; C; 1EB5; # LATIN CAPITAL LETTER A WITH BREVE AND TILDE
+1EB6; C; 1EB7; # LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
+1EB8; C; 1EB9; # LATIN CAPITAL LETTER E WITH DOT BELOW
+1EBA; C; 1EBB; # LATIN CAPITAL LETTER E WITH HOOK ABOVE
+1EBC; C; 1EBD; # LATIN CAPITAL LETTER E WITH TILDE
+1EBE; C; 1EBF; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
+1EC0; C; 1EC1; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
+1EC2; C; 1EC3; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
+1EC4; C; 1EC5; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
+1EC6; C; 1EC7; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
+1EC8; C; 1EC9; # LATIN CAPITAL LETTER I WITH HOOK ABOVE
+1ECA; C; 1ECB; # LATIN CAPITAL LETTER I WITH DOT BELOW
+1ECC; C; 1ECD; # LATIN CAPITAL LETTER O WITH DOT BELOW
+1ECE; C; 1ECF; # LATIN CAPITAL LETTER O WITH HOOK ABOVE
+1ED0; C; 1ED1; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
+1ED2; C; 1ED3; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
+1ED4; C; 1ED5; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
+1ED6; C; 1ED7; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
+1ED8; C; 1ED9; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
+1EDA; C; 1EDB; # LATIN CAPITAL LETTER O WITH HORN AND ACUTE
+1EDC; C; 1EDD; # LATIN CAPITAL LETTER O WITH HORN AND GRAVE
+1EDE; C; 1EDF; # LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
+1EE0; C; 1EE1; # LATIN CAPITAL LETTER O WITH HORN AND TILDE
+1EE2; C; 1EE3; # LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
+1EE4; C; 1EE5; # LATIN CAPITAL LETTER U WITH DOT BELOW
+1EE6; C; 1EE7; # LATIN CAPITAL LETTER U WITH HOOK ABOVE
+1EE8; C; 1EE9; # LATIN CAPITAL LETTER U WITH HORN AND ACUTE
+1EEA; C; 1EEB; # LATIN CAPITAL LETTER U WITH HORN AND GRAVE
+1EEC; C; 1EED; # LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
+1EEE; C; 1EEF; # LATIN CAPITAL LETTER U WITH HORN AND TILDE
+1EF0; C; 1EF1; # LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
+1EF2; C; 1EF3; # LATIN CAPITAL LETTER Y WITH GRAVE
+1EF4; C; 1EF5; # LATIN CAPITAL LETTER Y WITH DOT BELOW
+1EF6; C; 1EF7; # LATIN CAPITAL LETTER Y WITH HOOK ABOVE
+1EF8; C; 1EF9; # LATIN CAPITAL LETTER Y WITH TILDE
+1EFA; C; 1EFB; # LATIN CAPITAL LETTER MIDDLE-WELSH LL
+1EFC; C; 1EFD; # LATIN CAPITAL LETTER MIDDLE-WELSH V
+1EFE; C; 1EFF; # LATIN CAPITAL LETTER Y WITH LOOP
+1F08; C; 1F00; # GREEK CAPITAL LETTER ALPHA WITH PSILI
+1F09; C; 1F01; # GREEK CAPITAL LETTER ALPHA WITH DASIA
+1F0A; C; 1F02; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA
+1F0B; C; 1F03; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA
+1F0C; C; 1F04; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA
+1F0D; C; 1F05; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA
+1F0E; C; 1F06; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI
+1F0F; C; 1F07; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI
+1F18; C; 1F10; # GREEK CAPITAL LETTER EPSILON WITH PSILI
+1F19; C; 1F11; # GREEK CAPITAL LETTER EPSILON WITH DASIA
+1F1A; C; 1F12; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND VARIA
+1F1B; C; 1F13; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND VARIA
+1F1C; C; 1F14; # GREEK CAPITAL LETTER EPSILON WITH PSILI AND OXIA
+1F1D; C; 1F15; # GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA
+1F28; C; 1F20; # GREEK CAPITAL LETTER ETA WITH PSILI
+1F29; C; 1F21; # GREEK CAPITAL LETTER ETA WITH DASIA
+1F2A; C; 1F22; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA
+1F2B; C; 1F23; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA
+1F2C; C; 1F24; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA
+1F2D; C; 1F25; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA
+1F2E; C; 1F26; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI
+1F2F; C; 1F27; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI
+1F38; C; 1F30; # GREEK CAPITAL LETTER IOTA WITH PSILI
+1F39; C; 1F31; # GREEK CAPITAL LETTER IOTA WITH DASIA
+1F3A; C; 1F32; # GREEK CAPITAL LETTER IOTA WITH PSILI AND VARIA
+1F3B; C; 1F33; # GREEK CAPITAL LETTER IOTA WITH DASIA AND VARIA
+1F3C; C; 1F34; # GREEK CAPITAL LETTER IOTA WITH PSILI AND OXIA
+1F3D; C; 1F35; # GREEK CAPITAL LETTER IOTA WITH DASIA AND OXIA
+1F3E; C; 1F36; # GREEK CAPITAL LETTER IOTA WITH PSILI AND PERISPOMENI
+1F3F; C; 1F37; # GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI
+1F48; C; 1F40; # GREEK CAPITAL LETTER OMICRON WITH PSILI
+1F49; C; 1F41; # GREEK CAPITAL LETTER OMICRON WITH DASIA
+1F4A; C; 1F42; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND VARIA
+1F4B; C; 1F43; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND VARIA
+1F4C; C; 1F44; # GREEK CAPITAL LETTER OMICRON WITH PSILI AND OXIA
+1F4D; C; 1F45; # GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA
+1F50; F; 03C5 0313; # GREEK SMALL LETTER UPSILON WITH PSILI
+1F52; F; 03C5 0313 0300; # GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA
+1F54; F; 03C5 0313 0301; # GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA
+1F56; F; 03C5 0313 0342; # GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI
+1F59; C; 1F51; # GREEK CAPITAL LETTER UPSILON WITH DASIA
+1F5B; C; 1F53; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA
+1F5D; C; 1F55; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA
+1F5F; C; 1F57; # GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI
+1F68; C; 1F60; # GREEK CAPITAL LETTER OMEGA WITH PSILI
+1F69; C; 1F61; # GREEK CAPITAL LETTER OMEGA WITH DASIA
+1F6A; C; 1F62; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA
+1F6B; C; 1F63; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA
+1F6C; C; 1F64; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA
+1F6D; C; 1F65; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA
+1F6E; C; 1F66; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI
+1F6F; C; 1F67; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI
+1F80; F; 1F00 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI
+1F81; F; 1F01 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI
+1F82; F; 1F02 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F83; F; 1F03 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F84; F; 1F04 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F85; F; 1F05 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F86; F; 1F06 03B9; # GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F87; F; 1F07 03B9; # GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F88; F; 1F00 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F88; S; 1F80; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
+1F89; F; 1F01 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F89; S; 1F81; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
+1F8A; F; 1F02 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8A; S; 1F82; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F8B; F; 1F03 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8B; S; 1F83; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F8C; F; 1F04 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8C; S; 1F84; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F8D; F; 1F05 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8D; S; 1F85; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F8E; F; 1F06 03B9; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8E; S; 1F86; # GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; F; 1F07 03B9; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F8F; S; 1F87; # GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F90; F; 1F20 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI
+1F91; F; 1F21 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI
+1F92; F; 1F22 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1F93; F; 1F23 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1F94; F; 1F24 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1F95; F; 1F25 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1F96; F; 1F26 03B9; # GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1F97; F; 1F27 03B9; # GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1F98; F; 1F20 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F98; S; 1F90; # GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
+1F99; F; 1F21 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F99; S; 1F91; # GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
+1F9A; F; 1F22 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9A; S; 1F92; # GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1F9B; F; 1F23 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9B; S; 1F93; # GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1F9C; F; 1F24 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9C; S; 1F94; # GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1F9D; F; 1F25 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9D; S; 1F95; # GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1F9E; F; 1F26 03B9; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9E; S; 1F96; # GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; F; 1F27 03B9; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1F9F; S; 1F97; # GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FA0; F; 1F60 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI
+1FA1; F; 1F61 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI
+1FA2; F; 1F62 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI
+1FA3; F; 1F63 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI
+1FA4; F; 1F64 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI
+1FA5; F; 1F65 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI
+1FA6; F; 1F66 03B9; # GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
+1FA7; F; 1F67 03B9; # GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
+1FA8; F; 1F60 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA8; S; 1FA0; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
+1FA9; F; 1F61 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FA9; S; 1FA1; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
+1FAA; F; 1F62 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAA; S; 1FA2; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
+1FAB; F; 1F63 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAB; S; 1FA3; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
+1FAC; F; 1F64 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAC; S; 1FA4; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
+1FAD; F; 1F65 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAD; S; 1FA5; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
+1FAE; F; 1F66 03B9; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAE; S; 1FA6; # GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; F; 1F67 03B9; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FAF; S; 1FA7; # GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
+1FB2; F; 1F70 03B9; # GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI
+1FB3; F; 03B1 03B9; # GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI
+1FB4; F; 03AC 03B9; # GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI
+1FB6; F; 03B1 0342; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI
+1FB7; F; 03B1 0342 03B9; # GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FB8; C; 1FB0; # GREEK CAPITAL LETTER ALPHA WITH VRACHY
+1FB9; C; 1FB1; # GREEK CAPITAL LETTER ALPHA WITH MACRON
+1FBA; C; 1F70; # GREEK CAPITAL LETTER ALPHA WITH VARIA
+1FBB; C; 1F71; # GREEK CAPITAL LETTER ALPHA WITH OXIA
+1FBC; F; 03B1 03B9; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBC; S; 1FB3; # GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
+1FBE; C; 03B9; # GREEK PROSGEGRAMMENI
+1FC2; F; 1F74 03B9; # GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI
+1FC3; F; 03B7 03B9; # GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI
+1FC4; F; 03AE 03B9; # GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI
+1FC6; F; 03B7 0342; # GREEK SMALL LETTER ETA WITH PERISPOMENI
+1FC7; F; 03B7 0342 03B9; # GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FC8; C; 1F72; # GREEK CAPITAL LETTER EPSILON WITH VARIA
+1FC9; C; 1F73; # GREEK CAPITAL LETTER EPSILON WITH OXIA
+1FCA; C; 1F74; # GREEK CAPITAL LETTER ETA WITH VARIA
+1FCB; C; 1F75; # GREEK CAPITAL LETTER ETA WITH OXIA
+1FCC; F; 03B7 03B9; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FCC; S; 1FC3; # GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
+1FD2; F; 03B9 0308 0300; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA
+1FD3; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
+1FD6; F; 03B9 0342; # GREEK SMALL LETTER IOTA WITH PERISPOMENI
+1FD7; F; 03B9 0308 0342; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI
+1FD8; C; 1FD0; # GREEK CAPITAL LETTER IOTA WITH VRACHY
+1FD9; C; 1FD1; # GREEK CAPITAL LETTER IOTA WITH MACRON
+1FDA; C; 1F76; # GREEK CAPITAL LETTER IOTA WITH VARIA
+1FDB; C; 1F77; # GREEK CAPITAL LETTER IOTA WITH OXIA
+1FE2; F; 03C5 0308 0300; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA
+1FE3; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
+1FE4; F; 03C1 0313; # GREEK SMALL LETTER RHO WITH PSILI
+1FE6; F; 03C5 0342; # GREEK SMALL LETTER UPSILON WITH PERISPOMENI
+1FE7; F; 03C5 0308 0342; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI
+1FE8; C; 1FE0; # GREEK CAPITAL LETTER UPSILON WITH VRACHY
+1FE9; C; 1FE1; # GREEK CAPITAL LETTER UPSILON WITH MACRON
+1FEA; C; 1F7A; # GREEK CAPITAL LETTER UPSILON WITH VARIA
+1FEB; C; 1F7B; # GREEK CAPITAL LETTER UPSILON WITH OXIA
+1FEC; C; 1FE5; # GREEK CAPITAL LETTER RHO WITH DASIA
+1FF2; F; 1F7C 03B9; # GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI
+1FF3; F; 03C9 03B9; # GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI
+1FF4; F; 03CE 03B9; # GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI
+1FF6; F; 03C9 0342; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI
+1FF7; F; 03C9 0342 03B9; # GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI
+1FF8; C; 1F78; # GREEK CAPITAL LETTER OMICRON WITH VARIA
+1FF9; C; 1F79; # GREEK CAPITAL LETTER OMICRON WITH OXIA
+1FFA; C; 1F7C; # GREEK CAPITAL LETTER OMEGA WITH VARIA
+1FFB; C; 1F7D; # GREEK CAPITAL LETTER OMEGA WITH OXIA
+1FFC; F; 03C9 03B9; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+1FFC; S; 1FF3; # GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
+2126; C; 03C9; # OHM SIGN
+212A; C; 006B; # KELVIN SIGN
+212B; C; 00E5; # ANGSTROM SIGN
+2132; C; 214E; # TURNED CAPITAL F
+2160; C; 2170; # ROMAN NUMERAL ONE
+2161; C; 2171; # ROMAN NUMERAL TWO
+2162; C; 2172; # ROMAN NUMERAL THREE
+2163; C; 2173; # ROMAN NUMERAL FOUR
+2164; C; 2174; # ROMAN NUMERAL FIVE
+2165; C; 2175; # ROMAN NUMERAL SIX
+2166; C; 2176; # ROMAN NUMERAL SEVEN
+2167; C; 2177; # ROMAN NUMERAL EIGHT
+2168; C; 2178; # ROMAN NUMERAL NINE
+2169; C; 2179; # ROMAN NUMERAL TEN
+216A; C; 217A; # ROMAN NUMERAL ELEVEN
+216B; C; 217B; # ROMAN NUMERAL TWELVE
+216C; C; 217C; # ROMAN NUMERAL FIFTY
+216D; C; 217D; # ROMAN NUMERAL ONE HUNDRED
+216E; C; 217E; # ROMAN NUMERAL FIVE HUNDRED
+216F; C; 217F; # ROMAN NUMERAL ONE THOUSAND
+2183; C; 2184; # ROMAN NUMERAL REVERSED ONE HUNDRED
+24B6; C; 24D0; # CIRCLED LATIN CAPITAL LETTER A
+24B7; C; 24D1; # CIRCLED LATIN CAPITAL LETTER B
+24B8; C; 24D2; # CIRCLED LATIN CAPITAL LETTER C
+24B9; C; 24D3; # CIRCLED LATIN CAPITAL LETTER D
+24BA; C; 24D4; # CIRCLED LATIN CAPITAL LETTER E
+24BB; C; 24D5; # CIRCLED LATIN CAPITAL LETTER F
+24BC; C; 24D6; # CIRCLED LATIN CAPITAL LETTER G
+24BD; C; 24D7; # CIRCLED LATIN CAPITAL LETTER H
+24BE; C; 24D8; # CIRCLED LATIN CAPITAL LETTER I
+24BF; C; 24D9; # CIRCLED LATIN CAPITAL LETTER J
+24C0; C; 24DA; # CIRCLED LATIN CAPITAL LETTER K
+24C1; C; 24DB; # CIRCLED LATIN CAPITAL LETTER L
+24C2; C; 24DC; # CIRCLED LATIN CAPITAL LETTER M
+24C3; C; 24DD; # CIRCLED LATIN CAPITAL LETTER N
+24C4; C; 24DE; # CIRCLED LATIN CAPITAL LETTER O
+24C5; C; 24DF; # CIRCLED LATIN CAPITAL LETTER P
+24C6; C; 24E0; # CIRCLED LATIN CAPITAL LETTER Q
+24C7; C; 24E1; # CIRCLED LATIN CAPITAL LETTER R
+24C8; C; 24E2; # CIRCLED LATIN CAPITAL LETTER S
+24C9; C; 24E3; # CIRCLED LATIN CAPITAL LETTER T
+24CA; C; 24E4; # CIRCLED LATIN CAPITAL LETTER U
+24CB; C; 24E5; # CIRCLED LATIN CAPITAL LETTER V
+24CC; C; 24E6; # CIRCLED LATIN CAPITAL LETTER W
+24CD; C; 24E7; # CIRCLED LATIN CAPITAL LETTER X
+24CE; C; 24E8; # CIRCLED LATIN CAPITAL LETTER Y
+24CF; C; 24E9; # CIRCLED LATIN CAPITAL LETTER Z
+2C00; C; 2C30; # GLAGOLITIC CAPITAL LETTER AZU
+2C01; C; 2C31; # GLAGOLITIC CAPITAL LETTER BUKY
+2C02; C; 2C32; # GLAGOLITIC CAPITAL LETTER VEDE
+2C03; C; 2C33; # GLAGOLITIC CAPITAL LETTER GLAGOLI
+2C04; C; 2C34; # GLAGOLITIC CAPITAL LETTER DOBRO
+2C05; C; 2C35; # GLAGOLITIC CAPITAL LETTER YESTU
+2C06; C; 2C36; # GLAGOLITIC CAPITAL LETTER ZHIVETE
+2C07; C; 2C37; # GLAGOLITIC CAPITAL LETTER DZELO
+2C08; C; 2C38; # GLAGOLITIC CAPITAL LETTER ZEMLJA
+2C09; C; 2C39; # GLAGOLITIC CAPITAL LETTER IZHE
+2C0A; C; 2C3A; # GLAGOLITIC CAPITAL LETTER INITIAL IZHE
+2C0B; C; 2C3B; # GLAGOLITIC CAPITAL LETTER I
+2C0C; C; 2C3C; # GLAGOLITIC CAPITAL LETTER DJERVI
+2C0D; C; 2C3D; # GLAGOLITIC CAPITAL LETTER KAKO
+2C0E; C; 2C3E; # GLAGOLITIC CAPITAL LETTER LJUDIJE
+2C0F; C; 2C3F; # GLAGOLITIC CAPITAL LETTER MYSLITE
+2C10; C; 2C40; # GLAGOLITIC CAPITAL LETTER NASHI
+2C11; C; 2C41; # GLAGOLITIC CAPITAL LETTER ONU
+2C12; C; 2C42; # GLAGOLITIC CAPITAL LETTER POKOJI
+2C13; C; 2C43; # GLAGOLITIC CAPITAL LETTER RITSI
+2C14; C; 2C44; # GLAGOLITIC CAPITAL LETTER SLOVO
+2C15; C; 2C45; # GLAGOLITIC CAPITAL LETTER TVRIDO
+2C16; C; 2C46; # GLAGOLITIC CAPITAL LETTER UKU
+2C17; C; 2C47; # GLAGOLITIC CAPITAL LETTER FRITU
+2C18; C; 2C48; # GLAGOLITIC CAPITAL LETTER HERU
+2C19; C; 2C49; # GLAGOLITIC CAPITAL LETTER OTU
+2C1A; C; 2C4A; # GLAGOLITIC CAPITAL LETTER PE
+2C1B; C; 2C4B; # GLAGOLITIC CAPITAL LETTER SHTA
+2C1C; C; 2C4C; # GLAGOLITIC CAPITAL LETTER TSI
+2C1D; C; 2C4D; # GLAGOLITIC CAPITAL LETTER CHRIVI
+2C1E; C; 2C4E; # GLAGOLITIC CAPITAL LETTER SHA
+2C1F; C; 2C4F; # GLAGOLITIC CAPITAL LETTER YERU
+2C20; C; 2C50; # GLAGOLITIC CAPITAL LETTER YERI
+2C21; C; 2C51; # GLAGOLITIC CAPITAL LETTER YATI
+2C22; C; 2C52; # GLAGOLITIC CAPITAL LETTER SPIDERY HA
+2C23; C; 2C53; # GLAGOLITIC CAPITAL LETTER YU
+2C24; C; 2C54; # GLAGOLITIC CAPITAL LETTER SMALL YUS
+2C25; C; 2C55; # GLAGOLITIC CAPITAL LETTER SMALL YUS WITH TAIL
+2C26; C; 2C56; # GLAGOLITIC CAPITAL LETTER YO
+2C27; C; 2C57; # GLAGOLITIC CAPITAL LETTER IOTATED SMALL YUS
+2C28; C; 2C58; # GLAGOLITIC CAPITAL LETTER BIG YUS
+2C29; C; 2C59; # GLAGOLITIC CAPITAL LETTER IOTATED BIG YUS
+2C2A; C; 2C5A; # GLAGOLITIC CAPITAL LETTER FITA
+2C2B; C; 2C5B; # GLAGOLITIC CAPITAL LETTER IZHITSA
+2C2C; C; 2C5C; # GLAGOLITIC CAPITAL LETTER SHTAPIC
+2C2D; C; 2C5D; # GLAGOLITIC CAPITAL LETTER TROKUTASTI A
+2C2E; C; 2C5E; # GLAGOLITIC CAPITAL LETTER LATINATE MYSLITE
+2C2F; C; 2C5F; # GLAGOLITIC CAPITAL LETTER CAUDATE CHRIVI
+2C60; C; 2C61; # LATIN CAPITAL LETTER L WITH DOUBLE BAR
+2C62; C; 026B; # LATIN CAPITAL LETTER L WITH MIDDLE TILDE
+2C63; C; 1D7D; # LATIN CAPITAL LETTER P WITH STROKE
+2C64; C; 027D; # LATIN CAPITAL LETTER R WITH TAIL
+2C67; C; 2C68; # LATIN CAPITAL LETTER H WITH DESCENDER
+2C69; C; 2C6A; # LATIN CAPITAL LETTER K WITH DESCENDER
+2C6B; C; 2C6C; # LATIN CAPITAL LETTER Z WITH DESCENDER
+2C6D; C; 0251; # LATIN CAPITAL LETTER ALPHA
+2C6E; C; 0271; # LATIN CAPITAL LETTER M WITH HOOK
+2C6F; C; 0250; # LATIN CAPITAL LETTER TURNED A
+2C70; C; 0252; # LATIN CAPITAL LETTER TURNED ALPHA
+2C72; C; 2C73; # LATIN CAPITAL LETTER W WITH HOOK
+2C75; C; 2C76; # LATIN CAPITAL LETTER HALF H
+2C7E; C; 023F; # LATIN CAPITAL LETTER S WITH SWASH TAIL
+2C7F; C; 0240; # LATIN CAPITAL LETTER Z WITH SWASH TAIL
+2C80; C; 2C81; # COPTIC CAPITAL LETTER ALFA
+2C82; C; 2C83; # COPTIC CAPITAL LETTER VIDA
+2C84; C; 2C85; # COPTIC CAPITAL LETTER GAMMA
+2C86; C; 2C87; # COPTIC CAPITAL LETTER DALDA
+2C88; C; 2C89; # COPTIC CAPITAL LETTER EIE
+2C8A; C; 2C8B; # COPTIC CAPITAL LETTER SOU
+2C8C; C; 2C8D; # COPTIC CAPITAL LETTER ZATA
+2C8E; C; 2C8F; # COPTIC CAPITAL LETTER HATE
+2C90; C; 2C91; # COPTIC CAPITAL LETTER THETHE
+2C92; C; 2C93; # COPTIC CAPITAL LETTER IAUDA
+2C94; C; 2C95; # COPTIC CAPITAL LETTER KAPA
+2C96; C; 2C97; # COPTIC CAPITAL LETTER LAULA
+2C98; C; 2C99; # COPTIC CAPITAL LETTER MI
+2C9A; C; 2C9B; # COPTIC CAPITAL LETTER NI
+2C9C; C; 2C9D; # COPTIC CAPITAL LETTER KSI
+2C9E; C; 2C9F; # COPTIC CAPITAL LETTER O
+2CA0; C; 2CA1; # COPTIC CAPITAL LETTER PI
+2CA2; C; 2CA3; # COPTIC CAPITAL LETTER RO
+2CA4; C; 2CA5; # COPTIC CAPITAL LETTER SIMA
+2CA6; C; 2CA7; # COPTIC CAPITAL LETTER TAU
+2CA8; C; 2CA9; # COPTIC CAPITAL LETTER UA
+2CAA; C; 2CAB; # COPTIC CAPITAL LETTER FI
+2CAC; C; 2CAD; # COPTIC CAPITAL LETTER KHI
+2CAE; C; 2CAF; # COPTIC CAPITAL LETTER PSI
+2CB0; C; 2CB1; # COPTIC CAPITAL LETTER OOU
+2CB2; C; 2CB3; # COPTIC CAPITAL LETTER DIALECT-P ALEF
+2CB4; C; 2CB5; # COPTIC CAPITAL LETTER OLD COPTIC AIN
+2CB6; C; 2CB7; # COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE
+2CB8; C; 2CB9; # COPTIC CAPITAL LETTER DIALECT-P KAPA
+2CBA; C; 2CBB; # COPTIC CAPITAL LETTER DIALECT-P NI
+2CBC; C; 2CBD; # COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI
+2CBE; C; 2CBF; # COPTIC CAPITAL LETTER OLD COPTIC OOU
+2CC0; C; 2CC1; # COPTIC CAPITAL LETTER SAMPI
+2CC2; C; 2CC3; # COPTIC CAPITAL LETTER CROSSED SHEI
+2CC4; C; 2CC5; # COPTIC CAPITAL LETTER OLD COPTIC SHEI
+2CC6; C; 2CC7; # COPTIC CAPITAL LETTER OLD COPTIC ESH
+2CC8; C; 2CC9; # COPTIC CAPITAL LETTER AKHMIMIC KHEI
+2CCA; C; 2CCB; # COPTIC CAPITAL LETTER DIALECT-P HORI
+2CCC; C; 2CCD; # COPTIC CAPITAL LETTER OLD COPTIC HORI
+2CCE; C; 2CCF; # COPTIC CAPITAL LETTER OLD COPTIC HA
+2CD0; C; 2CD1; # COPTIC CAPITAL LETTER L-SHAPED HA
+2CD2; C; 2CD3; # COPTIC CAPITAL LETTER OLD COPTIC HEI
+2CD4; C; 2CD5; # COPTIC CAPITAL LETTER OLD COPTIC HAT
+2CD6; C; 2CD7; # COPTIC CAPITAL LETTER OLD COPTIC GANGIA
+2CD8; C; 2CD9; # COPTIC CAPITAL LETTER OLD COPTIC DJA
+2CDA; C; 2CDB; # COPTIC CAPITAL LETTER OLD COPTIC SHIMA
+2CDC; C; 2CDD; # COPTIC CAPITAL LETTER OLD NUBIAN SHIMA
+2CDE; C; 2CDF; # COPTIC CAPITAL LETTER OLD NUBIAN NGI
+2CE0; C; 2CE1; # COPTIC CAPITAL LETTER OLD NUBIAN NYI
+2CE2; C; 2CE3; # COPTIC CAPITAL LETTER OLD NUBIAN WAU
+2CEB; C; 2CEC; # COPTIC CAPITAL LETTER CRYPTOGRAMMIC SHEI
+2CED; C; 2CEE; # COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA
+2CF2; C; 2CF3; # COPTIC CAPITAL LETTER BOHAIRIC KHEI
+A640; C; A641; # CYRILLIC CAPITAL LETTER ZEMLYA
+A642; C; A643; # CYRILLIC CAPITAL LETTER DZELO
+A644; C; A645; # CYRILLIC CAPITAL LETTER REVERSED DZE
+A646; C; A647; # CYRILLIC CAPITAL LETTER IOTA
+A648; C; A649; # CYRILLIC CAPITAL LETTER DJERV
+A64A; C; A64B; # CYRILLIC CAPITAL LETTER MONOGRAPH UK
+A64C; C; A64D; # CYRILLIC CAPITAL LETTER BROAD OMEGA
+A64E; C; A64F; # CYRILLIC CAPITAL LETTER NEUTRAL YER
+A650; C; A651; # CYRILLIC CAPITAL LETTER YERU WITH BACK YER
+A652; C; A653; # CYRILLIC CAPITAL LETTER IOTIFIED YAT
+A654; C; A655; # CYRILLIC CAPITAL LETTER REVERSED YU
+A656; C; A657; # CYRILLIC CAPITAL LETTER IOTIFIED A
+A658; C; A659; # CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS
+A65A; C; A65B; # CYRILLIC CAPITAL LETTER BLENDED YUS
+A65C; C; A65D; # CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITTLE YUS
+A65E; C; A65F; # CYRILLIC CAPITAL LETTER YN
+A660; C; A661; # CYRILLIC CAPITAL LETTER REVERSED TSE
+A662; C; A663; # CYRILLIC CAPITAL LETTER SOFT DE
+A664; C; A665; # CYRILLIC CAPITAL LETTER SOFT EL
+A666; C; A667; # CYRILLIC CAPITAL LETTER SOFT EM
+A668; C; A669; # CYRILLIC CAPITAL LETTER MONOCULAR O
+A66A; C; A66B; # CYRILLIC CAPITAL LETTER BINOCULAR O
+A66C; C; A66D; # CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O
+A680; C; A681; # CYRILLIC CAPITAL LETTER DWE
+A682; C; A683; # CYRILLIC CAPITAL LETTER DZWE
+A684; C; A685; # CYRILLIC CAPITAL LETTER ZHWE
+A686; C; A687; # CYRILLIC CAPITAL LETTER CCHE
+A688; C; A689; # CYRILLIC CAPITAL LETTER DZZE
+A68A; C; A68B; # CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK
+A68C; C; A68D; # CYRILLIC CAPITAL LETTER TWE
+A68E; C; A68F; # CYRILLIC CAPITAL LETTER TSWE
+A690; C; A691; # CYRILLIC CAPITAL LETTER TSSE
+A692; C; A693; # CYRILLIC CAPITAL LETTER TCHE
+A694; C; A695; # CYRILLIC CAPITAL LETTER HWE
+A696; C; A697; # CYRILLIC CAPITAL LETTER SHWE
+A698; C; A699; # CYRILLIC CAPITAL LETTER DOUBLE O
+A69A; C; A69B; # CYRILLIC CAPITAL LETTER CROSSED O
+A722; C; A723; # LATIN CAPITAL LETTER EGYPTOLOGICAL ALEF
+A724; C; A725; # LATIN CAPITAL LETTER EGYPTOLOGICAL AIN
+A726; C; A727; # LATIN CAPITAL LETTER HENG
+A728; C; A729; # LATIN CAPITAL LETTER TZ
+A72A; C; A72B; # LATIN CAPITAL LETTER TRESILLO
+A72C; C; A72D; # LATIN CAPITAL LETTER CUATRILLO
+A72E; C; A72F; # LATIN CAPITAL LETTER CUATRILLO WITH COMMA
+A732; C; A733; # LATIN CAPITAL LETTER AA
+A734; C; A735; # LATIN CAPITAL LETTER AO
+A736; C; A737; # LATIN CAPITAL LETTER AU
+A738; C; A739; # LATIN CAPITAL LETTER AV
+A73A; C; A73B; # LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR
+A73C; C; A73D; # LATIN CAPITAL LETTER AY
+A73E; C; A73F; # LATIN CAPITAL LETTER REVERSED C WITH DOT
+A740; C; A741; # LATIN CAPITAL LETTER K WITH STROKE
+A742; C; A743; # LATIN CAPITAL LETTER K WITH DIAGONAL STROKE
+A744; C; A745; # LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE
+A746; C; A747; # LATIN CAPITAL LETTER BROKEN L
+A748; C; A749; # LATIN CAPITAL LETTER L WITH HIGH STROKE
+A74A; C; A74B; # LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY
+A74C; C; A74D; # LATIN CAPITAL LETTER O WITH LOOP
+A74E; C; A74F; # LATIN CAPITAL LETTER OO
+A750; C; A751; # LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER
+A752; C; A753; # LATIN CAPITAL LETTER P WITH FLOURISH
+A754; C; A755; # LATIN CAPITAL LETTER P WITH SQUIRREL TAIL
+A756; C; A757; # LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER
+A758; C; A759; # LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE
+A75A; C; A75B; # LATIN CAPITAL LETTER R ROTUNDA
+A75C; C; A75D; # LATIN CAPITAL LETTER RUM ROTUNDA
+A75E; C; A75F; # LATIN CAPITAL LETTER V WITH DIAGONAL STROKE
+A760; C; A761; # LATIN CAPITAL LETTER VY
+A762; C; A763; # LATIN CAPITAL LETTER VISIGOTHIC Z
+A764; C; A765; # LATIN CAPITAL LETTER THORN WITH STROKE
+A766; C; A767; # LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER
+A768; C; A769; # LATIN CAPITAL LETTER VEND
+A76A; C; A76B; # LATIN CAPITAL LETTER ET
+A76C; C; A76D; # LATIN CAPITAL LETTER IS
+A76E; C; A76F; # LATIN CAPITAL LETTER CON
+A779; C; A77A; # LATIN CAPITAL LETTER INSULAR D
+A77B; C; A77C; # LATIN CAPITAL LETTER INSULAR F
+A77D; C; 1D79; # LATIN CAPITAL LETTER INSULAR G
+A77E; C; A77F; # LATIN CAPITAL LETTER TURNED INSULAR G
+A780; C; A781; # LATIN CAPITAL LETTER TURNED L
+A782; C; A783; # LATIN CAPITAL LETTER INSULAR R
+A784; C; A785; # LATIN CAPITAL LETTER INSULAR S
+A786; C; A787; # LATIN CAPITAL LETTER INSULAR T
+A78B; C; A78C; # LATIN CAPITAL LETTER SALTILLO
+A78D; C; 0265; # LATIN CAPITAL LETTER TURNED H
+A790; C; A791; # LATIN CAPITAL LETTER N WITH DESCENDER
+A792; C; A793; # LATIN CAPITAL LETTER C WITH BAR
+A796; C; A797; # LATIN CAPITAL LETTER B WITH FLOURISH
+A798; C; A799; # LATIN CAPITAL LETTER F WITH STROKE
+A79A; C; A79B; # LATIN CAPITAL LETTER VOLAPUK AE
+A79C; C; A79D; # LATIN CAPITAL LETTER VOLAPUK OE
+A79E; C; A79F; # LATIN CAPITAL LETTER VOLAPUK UE
+A7A0; C; A7A1; # LATIN CAPITAL LETTER G WITH OBLIQUE STROKE
+A7A2; C; A7A3; # LATIN CAPITAL LETTER K WITH OBLIQUE STROKE
+A7A4; C; A7A5; # LATIN CAPITAL LETTER N WITH OBLIQUE STROKE
+A7A6; C; A7A7; # LATIN CAPITAL LETTER R WITH OBLIQUE STROKE
+A7A8; C; A7A9; # LATIN CAPITAL LETTER S WITH OBLIQUE STROKE
+A7AA; C; 0266; # LATIN CAPITAL LETTER H WITH HOOK
+A7AB; C; 025C; # LATIN CAPITAL LETTER REVERSED OPEN E
+A7AC; C; 0261; # LATIN CAPITAL LETTER SCRIPT G
+A7AD; C; 026C; # LATIN CAPITAL LETTER L WITH BELT
+A7AE; C; 026A; # LATIN CAPITAL LETTER SMALL CAPITAL I
+A7B0; C; 029E; # LATIN CAPITAL LETTER TURNED K
+A7B1; C; 0287; # LATIN CAPITAL LETTER TURNED T
+A7B2; C; 029D; # LATIN CAPITAL LETTER J WITH CROSSED-TAIL
+A7B3; C; AB53; # LATIN CAPITAL LETTER CHI
+A7B4; C; A7B5; # LATIN CAPITAL LETTER BETA
+A7B6; C; A7B7; # LATIN CAPITAL LETTER OMEGA
+A7B8; C; A7B9; # LATIN CAPITAL LETTER U WITH STROKE
+A7BA; C; A7BB; # LATIN CAPITAL LETTER GLOTTAL A
+A7BC; C; A7BD; # LATIN CAPITAL LETTER GLOTTAL I
+A7BE; C; A7BF; # LATIN CAPITAL LETTER GLOTTAL U
+A7C0; C; A7C1; # LATIN CAPITAL LETTER OLD POLISH O
+A7C2; C; A7C3; # LATIN CAPITAL LETTER ANGLICANA W
+A7C4; C; A794; # LATIN CAPITAL LETTER C WITH PALATAL HOOK
+A7C5; C; 0282; # LATIN CAPITAL LETTER S WITH HOOK
+A7C6; C; 1D8E; # LATIN CAPITAL LETTER Z WITH PALATAL HOOK
+A7C7; C; A7C8; # LATIN CAPITAL LETTER D WITH SHORT STROKE OVERLAY
+A7C9; C; A7CA; # LATIN CAPITAL LETTER S WITH SHORT STROKE OVERLAY
+A7D0; C; A7D1; # LATIN CAPITAL LETTER CLOSED INSULAR G
+A7D6; C; A7D7; # LATIN CAPITAL LETTER MIDDLE SCOTS S
+A7D8; C; A7D9; # LATIN CAPITAL LETTER SIGMOID S
+A7F5; C; A7F6; # LATIN CAPITAL LETTER REVERSED HALF H
+AB70; C; 13A0; # CHEROKEE SMALL LETTER A
+AB71; C; 13A1; # CHEROKEE SMALL LETTER E
+AB72; C; 13A2; # CHEROKEE SMALL LETTER I
+AB73; C; 13A3; # CHEROKEE SMALL LETTER O
+AB74; C; 13A4; # CHEROKEE SMALL LETTER U
+AB75; C; 13A5; # CHEROKEE SMALL LETTER V
+AB76; C; 13A6; # CHEROKEE SMALL LETTER GA
+AB77; C; 13A7; # CHEROKEE SMALL LETTER KA
+AB78; C; 13A8; # CHEROKEE SMALL LETTER GE
+AB79; C; 13A9; # CHEROKEE SMALL LETTER GI
+AB7A; C; 13AA; # CHEROKEE SMALL LETTER GO
+AB7B; C; 13AB; # CHEROKEE SMALL LETTER GU
+AB7C; C; 13AC; # CHEROKEE SMALL LETTER GV
+AB7D; C; 13AD; # CHEROKEE SMALL LETTER HA
+AB7E; C; 13AE; # CHEROKEE SMALL LETTER HE
+AB7F; C; 13AF; # CHEROKEE SMALL LETTER HI
+AB80; C; 13B0; # CHEROKEE SMALL LETTER HO
+AB81; C; 13B1; # CHEROKEE SMALL LETTER HU
+AB82; C; 13B2; # CHEROKEE SMALL LETTER HV
+AB83; C; 13B3; # CHEROKEE SMALL LETTER LA
+AB84; C; 13B4; # CHEROKEE SMALL LETTER LE
+AB85; C; 13B5; # CHEROKEE SMALL LETTER LI
+AB86; C; 13B6; # CHEROKEE SMALL LETTER LO
+AB87; C; 13B7; # CHEROKEE SMALL LETTER LU
+AB88; C; 13B8; # CHEROKEE SMALL LETTER LV
+AB89; C; 13B9; # CHEROKEE SMALL LETTER MA
+AB8A; C; 13BA; # CHEROKEE SMALL LETTER ME
+AB8B; C; 13BB; # CHEROKEE SMALL LETTER MI
+AB8C; C; 13BC; # CHEROKEE SMALL LETTER MO
+AB8D; C; 13BD; # CHEROKEE SMALL LETTER MU
+AB8E; C; 13BE; # CHEROKEE SMALL LETTER NA
+AB8F; C; 13BF; # CHEROKEE SMALL LETTER HNA
+AB90; C; 13C0; # CHEROKEE SMALL LETTER NAH
+AB91; C; 13C1; # CHEROKEE SMALL LETTER NE
+AB92; C; 13C2; # CHEROKEE SMALL LETTER NI
+AB93; C; 13C3; # CHEROKEE SMALL LETTER NO
+AB94; C; 13C4; # CHEROKEE SMALL LETTER NU
+AB95; C; 13C5; # CHEROKEE SMALL LETTER NV
+AB96; C; 13C6; # CHEROKEE SMALL LETTER QUA
+AB97; C; 13C7; # CHEROKEE SMALL LETTER QUE
+AB98; C; 13C8; # CHEROKEE SMALL LETTER QUI
+AB99; C; 13C9; # CHEROKEE SMALL LETTER QUO
+AB9A; C; 13CA; # CHEROKEE SMALL LETTER QUU
+AB9B; C; 13CB; # CHEROKEE SMALL LETTER QUV
+AB9C; C; 13CC; # CHEROKEE SMALL LETTER SA
+AB9D; C; 13CD; # CHEROKEE SMALL LETTER S
+AB9E; C; 13CE; # CHEROKEE SMALL LETTER SE
+AB9F; C; 13CF; # CHEROKEE SMALL LETTER SI
+ABA0; C; 13D0; # CHEROKEE SMALL LETTER SO
+ABA1; C; 13D1; # CHEROKEE SMALL LETTER SU
+ABA2; C; 13D2; # CHEROKEE SMALL LETTER SV
+ABA3; C; 13D3; # CHEROKEE SMALL LETTER DA
+ABA4; C; 13D4; # CHEROKEE SMALL LETTER TA
+ABA5; C; 13D5; # CHEROKEE SMALL LETTER DE
+ABA6; C; 13D6; # CHEROKEE SMALL LETTER TE
+ABA7; C; 13D7; # CHEROKEE SMALL LETTER DI
+ABA8; C; 13D8; # CHEROKEE SMALL LETTER TI
+ABA9; C; 13D9; # CHEROKEE SMALL LETTER DO
+ABAA; C; 13DA; # CHEROKEE SMALL LETTER DU
+ABAB; C; 13DB; # CHEROKEE SMALL LETTER DV
+ABAC; C; 13DC; # CHEROKEE SMALL LETTER DLA
+ABAD; C; 13DD; # CHEROKEE SMALL LETTER TLA
+ABAE; C; 13DE; # CHEROKEE SMALL LETTER TLE
+ABAF; C; 13DF; # CHEROKEE SMALL LETTER TLI
+ABB0; C; 13E0; # CHEROKEE SMALL LETTER TLO
+ABB1; C; 13E1; # CHEROKEE SMALL LETTER TLU
+ABB2; C; 13E2; # CHEROKEE SMALL LETTER TLV
+ABB3; C; 13E3; # CHEROKEE SMALL LETTER TSA
+ABB4; C; 13E4; # CHEROKEE SMALL LETTER TSE
+ABB5; C; 13E5; # CHEROKEE SMALL LETTER TSI
+ABB6; C; 13E6; # CHEROKEE SMALL LETTER TSO
+ABB7; C; 13E7; # CHEROKEE SMALL LETTER TSU
+ABB8; C; 13E8; # CHEROKEE SMALL LETTER TSV
+ABB9; C; 13E9; # CHEROKEE SMALL LETTER WA
+ABBA; C; 13EA; # CHEROKEE SMALL LETTER WE
+ABBB; C; 13EB; # CHEROKEE SMALL LETTER WI
+ABBC; C; 13EC; # CHEROKEE SMALL LETTER WO
+ABBD; C; 13ED; # CHEROKEE SMALL LETTER WU
+ABBE; C; 13EE; # CHEROKEE SMALL LETTER WV
+ABBF; C; 13EF; # CHEROKEE SMALL LETTER YA
+FB00; F; 0066 0066; # LATIN SMALL LIGATURE FF
+FB01; F; 0066 0069; # LATIN SMALL LIGATURE FI
+FB02; F; 0066 006C; # LATIN SMALL LIGATURE FL
+FB03; F; 0066 0066 0069; # LATIN SMALL LIGATURE FFI
+FB04; F; 0066 0066 006C; # LATIN SMALL LIGATURE FFL
+FB05; F; 0073 0074; # LATIN SMALL LIGATURE LONG S T
+FB06; F; 0073 0074; # LATIN SMALL LIGATURE ST
+FB13; F; 0574 0576; # ARMENIAN SMALL LIGATURE MEN NOW
+FB14; F; 0574 0565; # ARMENIAN SMALL LIGATURE MEN ECH
+FB15; F; 0574 056B; # ARMENIAN SMALL LIGATURE MEN INI
+FB16; F; 057E 0576; # ARMENIAN SMALL LIGATURE VEW NOW
+FB17; F; 0574 056D; # ARMENIAN SMALL LIGATURE MEN XEH
+FF21; C; FF41; # FULLWIDTH LATIN CAPITAL LETTER A
+FF22; C; FF42; # FULLWIDTH LATIN CAPITAL LETTER B
+FF23; C; FF43; # FULLWIDTH LATIN CAPITAL LETTER C
+FF24; C; FF44; # FULLWIDTH LATIN CAPITAL LETTER D
+FF25; C; FF45; # FULLWIDTH LATIN CAPITAL LETTER E
+FF26; C; FF46; # FULLWIDTH LATIN CAPITAL LETTER F
+FF27; C; FF47; # FULLWIDTH LATIN CAPITAL LETTER G
+FF28; C; FF48; # FULLWIDTH LATIN CAPITAL LETTER H
+FF29; C; FF49; # FULLWIDTH LATIN CAPITAL LETTER I
+FF2A; C; FF4A; # FULLWIDTH LATIN CAPITAL LETTER J
+FF2B; C; FF4B; # FULLWIDTH LATIN CAPITAL LETTER K
+FF2C; C; FF4C; # FULLWIDTH LATIN CAPITAL LETTER L
+FF2D; C; FF4D; # FULLWIDTH LATIN CAPITAL LETTER M
+FF2E; C; FF4E; # FULLWIDTH LATIN CAPITAL LETTER N
+FF2F; C; FF4F; # FULLWIDTH LATIN CAPITAL LETTER O
+FF30; C; FF50; # FULLWIDTH LATIN CAPITAL LETTER P
+FF31; C; FF51; # FULLWIDTH LATIN CAPITAL LETTER Q
+FF32; C; FF52; # FULLWIDTH LATIN CAPITAL LETTER R
+FF33; C; FF53; # FULLWIDTH LATIN CAPITAL LETTER S
+FF34; C; FF54; # FULLWIDTH LATIN CAPITAL LETTER T
+FF35; C; FF55; # FULLWIDTH LATIN CAPITAL LETTER U
+FF36; C; FF56; # FULLWIDTH LATIN CAPITAL LETTER V
+FF37; C; FF57; # FULLWIDTH LATIN CAPITAL LETTER W
+FF38; C; FF58; # FULLWIDTH LATIN CAPITAL LETTER X
+FF39; C; FF59; # FULLWIDTH LATIN CAPITAL LETTER Y
+FF3A; C; FF5A; # FULLWIDTH LATIN CAPITAL LETTER Z
+10400; C; 10428; # DESERET CAPITAL LETTER LONG I
+10401; C; 10429; # DESERET CAPITAL LETTER LONG E
+10402; C; 1042A; # DESERET CAPITAL LETTER LONG A
+10403; C; 1042B; # DESERET CAPITAL LETTER LONG AH
+10404; C; 1042C; # DESERET CAPITAL LETTER LONG O
+10405; C; 1042D; # DESERET CAPITAL LETTER LONG OO
+10406; C; 1042E; # DESERET CAPITAL LETTER SHORT I
+10407; C; 1042F; # DESERET CAPITAL LETTER SHORT E
+10408; C; 10430; # DESERET CAPITAL LETTER SHORT A
+10409; C; 10431; # DESERET CAPITAL LETTER SHORT AH
+1040A; C; 10432; # DESERET CAPITAL LETTER SHORT O
+1040B; C; 10433; # DESERET CAPITAL LETTER SHORT OO
+1040C; C; 10434; # DESERET CAPITAL LETTER AY
+1040D; C; 10435; # DESERET CAPITAL LETTER OW
+1040E; C; 10436; # DESERET CAPITAL LETTER WU
+1040F; C; 10437; # DESERET CAPITAL LETTER YEE
+10410; C; 10438; # DESERET CAPITAL LETTER H
+10411; C; 10439; # DESERET CAPITAL LETTER PEE
+10412; C; 1043A; # DESERET CAPITAL LETTER BEE
+10413; C; 1043B; # DESERET CAPITAL LETTER TEE
+10414; C; 1043C; # DESERET CAPITAL LETTER DEE
+10415; C; 1043D; # DESERET CAPITAL LETTER CHEE
+10416; C; 1043E; # DESERET CAPITAL LETTER JEE
+10417; C; 1043F; # DESERET CAPITAL LETTER KAY
+10418; C; 10440; # DESERET CAPITAL LETTER GAY
+10419; C; 10441; # DESERET CAPITAL LETTER EF
+1041A; C; 10442; # DESERET CAPITAL LETTER VEE
+1041B; C; 10443; # DESERET CAPITAL LETTER ETH
+1041C; C; 10444; # DESERET CAPITAL LETTER THEE
+1041D; C; 10445; # DESERET CAPITAL LETTER ES
+1041E; C; 10446; # DESERET CAPITAL LETTER ZEE
+1041F; C; 10447; # DESERET CAPITAL LETTER ESH
+10420; C; 10448; # DESERET CAPITAL LETTER ZHEE
+10421; C; 10449; # DESERET CAPITAL LETTER ER
+10422; C; 1044A; # DESERET CAPITAL LETTER EL
+10423; C; 1044B; # DESERET CAPITAL LETTER EM
+10424; C; 1044C; # DESERET CAPITAL LETTER EN
+10425; C; 1044D; # DESERET CAPITAL LETTER ENG
+10426; C; 1044E; # DESERET CAPITAL LETTER OI
+10427; C; 1044F; # DESERET CAPITAL LETTER EW
+104B0; C; 104D8; # OSAGE CAPITAL LETTER A
+104B1; C; 104D9; # OSAGE CAPITAL LETTER AI
+104B2; C; 104DA; # OSAGE CAPITAL LETTER AIN
+104B3; C; 104DB; # OSAGE CAPITAL LETTER AH
+104B4; C; 104DC; # OSAGE CAPITAL LETTER BRA
+104B5; C; 104DD; # OSAGE CAPITAL LETTER CHA
+104B6; C; 104DE; # OSAGE CAPITAL LETTER EHCHA
+104B7; C; 104DF; # OSAGE CAPITAL LETTER E
+104B8; C; 104E0; # OSAGE CAPITAL LETTER EIN
+104B9; C; 104E1; # OSAGE CAPITAL LETTER HA
+104BA; C; 104E2; # OSAGE CAPITAL LETTER HYA
+104BB; C; 104E3; # OSAGE CAPITAL LETTER I
+104BC; C; 104E4; # OSAGE CAPITAL LETTER KA
+104BD; C; 104E5; # OSAGE CAPITAL LETTER EHKA
+104BE; C; 104E6; # OSAGE CAPITAL LETTER KYA
+104BF; C; 104E7; # OSAGE CAPITAL LETTER LA
+104C0; C; 104E8; # OSAGE CAPITAL LETTER MA
+104C1; C; 104E9; # OSAGE CAPITAL LETTER NA
+104C2; C; 104EA; # OSAGE CAPITAL LETTER O
+104C3; C; 104EB; # OSAGE CAPITAL LETTER OIN
+104C4; C; 104EC; # OSAGE CAPITAL LETTER PA
+104C5; C; 104ED; # OSAGE CAPITAL LETTER EHPA
+104C6; C; 104EE; # OSAGE CAPITAL LETTER SA
+104C7; C; 104EF; # OSAGE CAPITAL LETTER SHA
+104C8; C; 104F0; # OSAGE CAPITAL LETTER TA
+104C9; C; 104F1; # OSAGE CAPITAL LETTER EHTA
+104CA; C; 104F2; # OSAGE CAPITAL LETTER TSA
+104CB; C; 104F3; # OSAGE CAPITAL LETTER EHTSA
+104CC; C; 104F4; # OSAGE CAPITAL LETTER TSHA
+104CD; C; 104F5; # OSAGE CAPITAL LETTER DHA
+104CE; C; 104F6; # OSAGE CAPITAL LETTER U
+104CF; C; 104F7; # OSAGE CAPITAL LETTER WA
+104D0; C; 104F8; # OSAGE CAPITAL LETTER KHA
+104D1; C; 104F9; # OSAGE CAPITAL LETTER GHA
+104D2; C; 104FA; # OSAGE CAPITAL LETTER ZA
+104D3; C; 104FB; # OSAGE CAPITAL LETTER ZHA
+10570; C; 10597; # VITHKUQI CAPITAL LETTER A
+10571; C; 10598; # VITHKUQI CAPITAL LETTER BBE
+10572; C; 10599; # VITHKUQI CAPITAL LETTER BE
+10573; C; 1059A; # VITHKUQI CAPITAL LETTER CE
+10574; C; 1059B; # VITHKUQI CAPITAL LETTER CHE
+10575; C; 1059C; # VITHKUQI CAPITAL LETTER DE
+10576; C; 1059D; # VITHKUQI CAPITAL LETTER DHE
+10577; C; 1059E; # VITHKUQI CAPITAL LETTER EI
+10578; C; 1059F; # VITHKUQI CAPITAL LETTER E
+10579; C; 105A0; # VITHKUQI CAPITAL LETTER FE
+1057A; C; 105A1; # VITHKUQI CAPITAL LETTER GA
+1057C; C; 105A3; # VITHKUQI CAPITAL LETTER HA
+1057D; C; 105A4; # VITHKUQI CAPITAL LETTER HHA
+1057E; C; 105A5; # VITHKUQI CAPITAL LETTER I
+1057F; C; 105A6; # VITHKUQI CAPITAL LETTER IJE
+10580; C; 105A7; # VITHKUQI CAPITAL LETTER JE
+10581; C; 105A8; # VITHKUQI CAPITAL LETTER KA
+10582; C; 105A9; # VITHKUQI CAPITAL LETTER LA
+10583; C; 105AA; # VITHKUQI CAPITAL LETTER LLA
+10584; C; 105AB; # VITHKUQI CAPITAL LETTER ME
+10585; C; 105AC; # VITHKUQI CAPITAL LETTER NE
+10586; C; 105AD; # VITHKUQI CAPITAL LETTER NJE
+10587; C; 105AE; # VITHKUQI CAPITAL LETTER O
+10588; C; 105AF; # VITHKUQI CAPITAL LETTER PE
+10589; C; 105B0; # VITHKUQI CAPITAL LETTER QA
+1058A; C; 105B1; # VITHKUQI CAPITAL LETTER RE
+1058C; C; 105B3; # VITHKUQI CAPITAL LETTER SE
+1058D; C; 105B4; # VITHKUQI CAPITAL LETTER SHE
+1058E; C; 105B5; # VITHKUQI CAPITAL LETTER TE
+1058F; C; 105B6; # VITHKUQI CAPITAL LETTER THE
+10590; C; 105B7; # VITHKUQI CAPITAL LETTER U
+10591; C; 105B8; # VITHKUQI CAPITAL LETTER VE
+10592; C; 105B9; # VITHKUQI CAPITAL LETTER XE
+10594; C; 105BB; # VITHKUQI CAPITAL LETTER Y
+10595; C; 105BC; # VITHKUQI CAPITAL LETTER ZE
+10C80; C; 10CC0; # OLD HUNGARIAN CAPITAL LETTER A
+10C81; C; 10CC1; # OLD HUNGARIAN CAPITAL LETTER AA
+10C82; C; 10CC2; # OLD HUNGARIAN CAPITAL LETTER EB
+10C83; C; 10CC3; # OLD HUNGARIAN CAPITAL LETTER AMB
+10C84; C; 10CC4; # OLD HUNGARIAN CAPITAL LETTER EC
+10C85; C; 10CC5; # OLD HUNGARIAN CAPITAL LETTER ENC
+10C86; C; 10CC6; # OLD HUNGARIAN CAPITAL LETTER ECS
+10C87; C; 10CC7; # OLD HUNGARIAN CAPITAL LETTER ED
+10C88; C; 10CC8; # OLD HUNGARIAN CAPITAL LETTER AND
+10C89; C; 10CC9; # OLD HUNGARIAN CAPITAL LETTER E
+10C8A; C; 10CCA; # OLD HUNGARIAN CAPITAL LETTER CLOSE E
+10C8B; C; 10CCB; # OLD HUNGARIAN CAPITAL LETTER EE
+10C8C; C; 10CCC; # OLD HUNGARIAN CAPITAL LETTER EF
+10C8D; C; 10CCD; # OLD HUNGARIAN CAPITAL LETTER EG
+10C8E; C; 10CCE; # OLD HUNGARIAN CAPITAL LETTER EGY
+10C8F; C; 10CCF; # OLD HUNGARIAN CAPITAL LETTER EH
+10C90; C; 10CD0; # OLD HUNGARIAN CAPITAL LETTER I
+10C91; C; 10CD1; # OLD HUNGARIAN CAPITAL LETTER II
+10C92; C; 10CD2; # OLD HUNGARIAN CAPITAL LETTER EJ
+10C93; C; 10CD3; # OLD HUNGARIAN CAPITAL LETTER EK
+10C94; C; 10CD4; # OLD HUNGARIAN CAPITAL LETTER AK
+10C95; C; 10CD5; # OLD HUNGARIAN CAPITAL LETTER UNK
+10C96; C; 10CD6; # OLD HUNGARIAN CAPITAL LETTER EL
+10C97; C; 10CD7; # OLD HUNGARIAN CAPITAL LETTER ELY
+10C98; C; 10CD8; # OLD HUNGARIAN CAPITAL LETTER EM
+10C99; C; 10CD9; # OLD HUNGARIAN CAPITAL LETTER EN
+10C9A; C; 10CDA; # OLD HUNGARIAN CAPITAL LETTER ENY
+10C9B; C; 10CDB; # OLD HUNGARIAN CAPITAL LETTER O
+10C9C; C; 10CDC; # OLD HUNGARIAN CAPITAL LETTER OO
+10C9D; C; 10CDD; # OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG OE
+10C9E; C; 10CDE; # OLD HUNGARIAN CAPITAL LETTER RUDIMENTA OE
+10C9F; C; 10CDF; # OLD HUNGARIAN CAPITAL LETTER OEE
+10CA0; C; 10CE0; # OLD HUNGARIAN CAPITAL LETTER EP
+10CA1; C; 10CE1; # OLD HUNGARIAN CAPITAL LETTER EMP
+10CA2; C; 10CE2; # OLD HUNGARIAN CAPITAL LETTER ER
+10CA3; C; 10CE3; # OLD HUNGARIAN CAPITAL LETTER SHORT ER
+10CA4; C; 10CE4; # OLD HUNGARIAN CAPITAL LETTER ES
+10CA5; C; 10CE5; # OLD HUNGARIAN CAPITAL LETTER ESZ
+10CA6; C; 10CE6; # OLD HUNGARIAN CAPITAL LETTER ET
+10CA7; C; 10CE7; # OLD HUNGARIAN CAPITAL LETTER ENT
+10CA8; C; 10CE8; # OLD HUNGARIAN CAPITAL LETTER ETY
+10CA9; C; 10CE9; # OLD HUNGARIAN CAPITAL LETTER ECH
+10CAA; C; 10CEA; # OLD HUNGARIAN CAPITAL LETTER U
+10CAB; C; 10CEB; # OLD HUNGARIAN CAPITAL LETTER UU
+10CAC; C; 10CEC; # OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG UE
+10CAD; C; 10CED; # OLD HUNGARIAN CAPITAL LETTER RUDIMENTA UE
+10CAE; C; 10CEE; # OLD HUNGARIAN CAPITAL LETTER EV
+10CAF; C; 10CEF; # OLD HUNGARIAN CAPITAL LETTER EZ
+10CB0; C; 10CF0; # OLD HUNGARIAN CAPITAL LETTER EZS
+10CB1; C; 10CF1; # OLD HUNGARIAN CAPITAL LETTER ENT-SHAPED SIGN
+10CB2; C; 10CF2; # OLD HUNGARIAN CAPITAL LETTER US
+118A0; C; 118C0; # WARANG CITI CAPITAL LETTER NGAA
+118A1; C; 118C1; # WARANG CITI CAPITAL LETTER A
+118A2; C; 118C2; # WARANG CITI CAPITAL LETTER WI
+118A3; C; 118C3; # WARANG CITI CAPITAL LETTER YU
+118A4; C; 118C4; # WARANG CITI CAPITAL LETTER YA
+118A5; C; 118C5; # WARANG CITI CAPITAL LETTER YO
+118A6; C; 118C6; # WARANG CITI CAPITAL LETTER II
+118A7; C; 118C7; # WARANG CITI CAPITAL LETTER UU
+118A8; C; 118C8; # WARANG CITI CAPITAL LETTER E
+118A9; C; 118C9; # WARANG CITI CAPITAL LETTER O
+118AA; C; 118CA; # WARANG CITI CAPITAL LETTER ANG
+118AB; C; 118CB; # WARANG CITI CAPITAL LETTER GA
+118AC; C; 118CC; # WARANG CITI CAPITAL LETTER KO
+118AD; C; 118CD; # WARANG CITI CAPITAL LETTER ENY
+118AE; C; 118CE; # WARANG CITI CAPITAL LETTER YUJ
+118AF; C; 118CF; # WARANG CITI CAPITAL LETTER UC
+118B0; C; 118D0; # WARANG CITI CAPITAL LETTER ENN
+118B1; C; 118D1; # WARANG CITI CAPITAL LETTER ODD
+118B2; C; 118D2; # WARANG CITI CAPITAL LETTER TTE
+118B3; C; 118D3; # WARANG CITI CAPITAL LETTER NUNG
+118B4; C; 118D4; # WARANG CITI CAPITAL LETTER DA
+118B5; C; 118D5; # WARANG CITI CAPITAL LETTER AT
+118B6; C; 118D6; # WARANG CITI CAPITAL LETTER AM
+118B7; C; 118D7; # WARANG CITI CAPITAL LETTER BU
+118B8; C; 118D8; # WARANG CITI CAPITAL LETTER PU
+118B9; C; 118D9; # WARANG CITI CAPITAL LETTER HIYO
+118BA; C; 118DA; # WARANG CITI CAPITAL LETTER HOLO
+118BB; C; 118DB; # WARANG CITI CAPITAL LETTER HORR
+118BC; C; 118DC; # WARANG CITI CAPITAL LETTER HAR
+118BD; C; 118DD; # WARANG CITI CAPITAL LETTER SSUU
+118BE; C; 118DE; # WARANG CITI CAPITAL LETTER SII
+118BF; C; 118DF; # WARANG CITI CAPITAL LETTER VIYO
+16E40; C; 16E60; # MEDEFAIDRIN CAPITAL LETTER M
+16E41; C; 16E61; # MEDEFAIDRIN CAPITAL LETTER S
+16E42; C; 16E62; # MEDEFAIDRIN CAPITAL LETTER V
+16E43; C; 16E63; # MEDEFAIDRIN CAPITAL LETTER W
+16E44; C; 16E64; # MEDEFAIDRIN CAPITAL LETTER ATIU
+16E45; C; 16E65; # MEDEFAIDRIN CAPITAL LETTER Z
+16E46; C; 16E66; # MEDEFAIDRIN CAPITAL LETTER KP
+16E47; C; 16E67; # MEDEFAIDRIN CAPITAL LETTER P
+16E48; C; 16E68; # MEDEFAIDRIN CAPITAL LETTER T
+16E49; C; 16E69; # MEDEFAIDRIN CAPITAL LETTER G
+16E4A; C; 16E6A; # MEDEFAIDRIN CAPITAL LETTER F
+16E4B; C; 16E6B; # MEDEFAIDRIN CAPITAL LETTER I
+16E4C; C; 16E6C; # MEDEFAIDRIN CAPITAL LETTER K
+16E4D; C; 16E6D; # MEDEFAIDRIN CAPITAL LETTER A
+16E4E; C; 16E6E; # MEDEFAIDRIN CAPITAL LETTER J
+16E4F; C; 16E6F; # MEDEFAIDRIN CAPITAL LETTER E
+16E50; C; 16E70; # MEDEFAIDRIN CAPITAL LETTER B
+16E51; C; 16E71; # MEDEFAIDRIN CAPITAL LETTER C
+16E52; C; 16E72; # MEDEFAIDRIN CAPITAL LETTER U
+16E53; C; 16E73; # MEDEFAIDRIN CAPITAL LETTER YU
+16E54; C; 16E74; # MEDEFAIDRIN CAPITAL LETTER L
+16E55; C; 16E75; # MEDEFAIDRIN CAPITAL LETTER Q
+16E56; C; 16E76; # MEDEFAIDRIN CAPITAL LETTER HP
+16E57; C; 16E77; # MEDEFAIDRIN CAPITAL LETTER NY
+16E58; C; 16E78; # MEDEFAIDRIN CAPITAL LETTER X
+16E59; C; 16E79; # MEDEFAIDRIN CAPITAL LETTER D
+16E5A; C; 16E7A; # MEDEFAIDRIN CAPITAL LETTER OE
+16E5B; C; 16E7B; # MEDEFAIDRIN CAPITAL LETTER N
+16E5C; C; 16E7C; # MEDEFAIDRIN CAPITAL LETTER R
+16E5D; C; 16E7D; # MEDEFAIDRIN CAPITAL LETTER O
+16E5E; C; 16E7E; # MEDEFAIDRIN CAPITAL LETTER AI
+16E5F; C; 16E7F; # MEDEFAIDRIN CAPITAL LETTER Y
+1E900; C; 1E922; # ADLAM CAPITAL LETTER ALIF
+1E901; C; 1E923; # ADLAM CAPITAL LETTER DAALI
+1E902; C; 1E924; # ADLAM CAPITAL LETTER LAAM
+1E903; C; 1E925; # ADLAM CAPITAL LETTER MIIM
+1E904; C; 1E926; # ADLAM CAPITAL LETTER BA
+1E905; C; 1E927; # ADLAM CAPITAL LETTER SINNYIIYHE
+1E906; C; 1E928; # ADLAM CAPITAL LETTER PE
+1E907; C; 1E929; # ADLAM CAPITAL LETTER BHE
+1E908; C; 1E92A; # ADLAM CAPITAL LETTER RA
+1E909; C; 1E92B; # ADLAM CAPITAL LETTER E
+1E90A; C; 1E92C; # ADLAM CAPITAL LETTER FA
+1E90B; C; 1E92D; # ADLAM CAPITAL LETTER I
+1E90C; C; 1E92E; # ADLAM CAPITAL LETTER O
+1E90D; C; 1E92F; # ADLAM CAPITAL LETTER DHA
+1E90E; C; 1E930; # ADLAM CAPITAL LETTER YHE
+1E90F; C; 1E931; # ADLAM CAPITAL LETTER WAW
+1E910; C; 1E932; # ADLAM CAPITAL LETTER NUN
+1E911; C; 1E933; # ADLAM CAPITAL LETTER KAF
+1E912; C; 1E934; # ADLAM CAPITAL LETTER YA
+1E913; C; 1E935; # ADLAM CAPITAL LETTER U
+1E914; C; 1E936; # ADLAM CAPITAL LETTER JIIM
+1E915; C; 1E937; # ADLAM CAPITAL LETTER CHI
+1E916; C; 1E938; # ADLAM CAPITAL LETTER HA
+1E917; C; 1E939; # ADLAM CAPITAL LETTER QAAF
+1E918; C; 1E93A; # ADLAM CAPITAL LETTER GA
+1E919; C; 1E93B; # ADLAM CAPITAL LETTER NYA
+1E91A; C; 1E93C; # ADLAM CAPITAL LETTER TU
+1E91B; C; 1E93D; # ADLAM CAPITAL LETTER NHA
+1E91C; C; 1E93E; # ADLAM CAPITAL LETTER VA
+1E91D; C; 1E93F; # ADLAM CAPITAL LETTER KHA
+1E91E; C; 1E940; # ADLAM CAPITAL LETTER GBE
+1E91F; C; 1E941; # ADLAM CAPITAL LETTER ZAL
+1E920; C; 1E942; # ADLAM CAPITAL LETTER KPO
+1E921; C; 1E943; # ADLAM CAPITAL LETTER SHA
+'''
+
+
+def _parse_unichr(s):
+    s = int(s, 16)
+    try:
+        return compat_chr(s)
+    except ValueError:
+        # work around "unichr() arg not in range(0x10000) (narrow Python build)"
+        return ('\\U%08x' % s).decode('unicode-escape')
+
+
+_map = dict(
+    (_parse_unichr(from_), ''.join(map(_parse_unichr, to_.split(' '))))
+    for from_, type_, to_, _ in (
+        l.split('; ', 3) for l in _map_str.splitlines() if l and not l[0] == '#')
+    if type_ in ('C', 'F'))
+del _map_str
+
+
+def casefold(s):
+    assert isinstance(s, compat_str)
+    return ''.join((_map.get(c, c) for c in s))
+
+
+__all__ = [
+    'casefold',
+]

+ 602 - 114
youtube_dl/compat.py

@@ -1,10 +1,12 @@
 # coding: utf-8
 # coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
+from __future__ import division
 
 
 import base64
 import base64
 import binascii
 import binascii
 import collections
 import collections
 import ctypes
 import ctypes
+import datetime
 import email
 import email
 import getpass
 import getpass
 import io
 import io
@@ -19,8 +21,31 @@ import socket
 import struct
 import struct
 import subprocess
 import subprocess
 import sys
 import sys
+import types
 import xml.etree.ElementTree
 import xml.etree.ElementTree
 
 
+# naming convention
+# 'compat_' + Python3_name.replace('.', '_')
+# other aliases exist for convenience and/or legacy
+
+# deal with critical unicode/str things first
+try:
+    # Python 2
+    compat_str, compat_basestring, compat_chr = (
+        unicode, basestring, unichr
+    )
+except NameError:
+    compat_str, compat_basestring, compat_chr = (
+        str, (str, bytes), chr
+    )
+
+# casefold
+try:
+    compat_str.casefold
+    compat_casefold = lambda s: s.casefold()
+except AttributeError:
+    from .casefold import casefold as compat_casefold
+
 try:
 try:
     import collections.abc as compat_collections_abc
     import collections.abc as compat_collections_abc
 except ImportError:
 except ImportError:
@@ -31,6 +56,29 @@ try:
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import urllib2 as compat_urllib_request
     import urllib2 as compat_urllib_request
 
 
+# Also fix up lack of method arg in old Pythons
+try:
+    type(compat_urllib_request.Request('http://127.0.0.1', method='GET'))
+except TypeError:
+    def _add_init_method_arg(cls):
+
+        init = cls.__init__
+
+        def wrapped_init(self, *args, **kwargs):
+            method = kwargs.pop('method', 'GET')
+            init(self, *args, **kwargs)
+            if any(callable(x.__dict__.get('get_method')) for x in (self.__class__, self) if x != cls):
+                # allow instance or its subclass to override get_method()
+                return
+            if self.has_data() and method == 'GET':
+                method = 'POST'
+            self.get_method = types.MethodType(lambda _: method, self)
+
+        cls.__init__ = wrapped_init
+
+    _add_init_method_arg(compat_urllib_request.Request)
+    del _add_init_method_arg
+
 try:
 try:
     import urllib.error as compat_urllib_error
     import urllib.error as compat_urllib_error
 except ImportError:  # Python 2
 except ImportError:  # Python 2
@@ -40,26 +88,32 @@ try:
     import urllib.parse as compat_urllib_parse
     import urllib.parse as compat_urllib_parse
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import urllib as compat_urllib_parse
     import urllib as compat_urllib_parse
+    import urlparse as _urlparse
+    for a in dir(_urlparse):
+        if not hasattr(compat_urllib_parse, a):
+            setattr(compat_urllib_parse, a, getattr(_urlparse, a))
+    del _urlparse
 
 
-try:
-    from urllib.parse import urlparse as compat_urllib_parse_urlparse
-except ImportError:  # Python 2
-    from urlparse import urlparse as compat_urllib_parse_urlparse
-
-try:
-    import urllib.parse as compat_urlparse
-except ImportError:  # Python 2
-    import urlparse as compat_urlparse
+# unfavoured aliases
+compat_urlparse = compat_urllib_parse
+compat_urllib_parse_urlparse = compat_urllib_parse.urlparse
 
 
 try:
 try:
     import urllib.response as compat_urllib_response
     import urllib.response as compat_urllib_response
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import urllib as compat_urllib_response
     import urllib as compat_urllib_response
 
 
+try:
+    compat_urllib_response.addinfourl.status
+except AttributeError:
+    # .getcode() is deprecated in Py 3.
+    compat_urllib_response.addinfourl.status = property(lambda self: self.getcode())
+
 try:
 try:
     import http.cookiejar as compat_cookiejar
     import http.cookiejar as compat_cookiejar
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import cookielib as compat_cookiejar
     import cookielib as compat_cookiejar
+compat_http_cookiejar = compat_cookiejar
 
 
 if sys.version_info[0] == 2:
 if sys.version_info[0] == 2:
     class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
     class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
@@ -71,20 +125,35 @@ if sys.version_info[0] == 2:
             compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
             compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
 else:
 else:
     compat_cookiejar_Cookie = compat_cookiejar.Cookie
     compat_cookiejar_Cookie = compat_cookiejar.Cookie
+compat_http_cookiejar_Cookie = compat_cookiejar_Cookie
 
 
 try:
 try:
     import http.cookies as compat_cookies
     import http.cookies as compat_cookies
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import Cookie as compat_cookies
     import Cookie as compat_cookies
+compat_http_cookies = compat_cookies
 
 
-if sys.version_info[0] == 2:
+if sys.version_info[0] == 2 or sys.version_info < (3, 3):
     class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
     class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
         def load(self, rawdata):
         def load(self, rawdata):
-            if isinstance(rawdata, compat_str):
-                rawdata = str(rawdata)
-            return super(compat_cookies_SimpleCookie, self).load(rawdata)
+            must_have_value = 0
+            if not isinstance(rawdata, dict):
+                if sys.version_info[:2] != (2, 7) or sys.platform.startswith('java'):
+                    # attribute must have value for parsing
+                    rawdata, must_have_value = re.subn(
+                        r'(?i)(;\s*)(secure|httponly)(\s*(?:;|$))', r'\1\2=\2\3', rawdata)
+                if sys.version_info[0] == 2:
+                    if isinstance(rawdata, compat_str):
+                        rawdata = str(rawdata)
+            super(compat_cookies_SimpleCookie, self).load(rawdata)
+            if must_have_value > 0:
+                for morsel in self.values():
+                    for attr in ('secure', 'httponly'):
+                        if morsel.get(attr):
+                            morsel[attr] = True
 else:
 else:
     compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
     compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
+compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie
 
 
 try:
 try:
     import html.entities as compat_html_entities
     import html.entities as compat_html_entities
@@ -2333,39 +2402,45 @@ try:
     import http.client as compat_http_client
     import http.client as compat_http_client
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     import httplib as compat_http_client
     import httplib as compat_http_client
+try:
+    compat_http_client.HTTPResponse.getcode
+except AttributeError:
+    # Py < 3.1
+    compat_http_client.HTTPResponse.getcode = lambda self: self.status
 
 
 try:
 try:
     from urllib.error import HTTPError as compat_HTTPError
     from urllib.error import HTTPError as compat_HTTPError
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     from urllib2 import HTTPError as compat_HTTPError
     from urllib2 import HTTPError as compat_HTTPError
+compat_urllib_HTTPError = compat_HTTPError
 
 
 try:
 try:
     from urllib.request import urlretrieve as compat_urlretrieve
     from urllib.request import urlretrieve as compat_urlretrieve
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     from urllib import urlretrieve as compat_urlretrieve
     from urllib import urlretrieve as compat_urlretrieve
+compat_urllib_request_urlretrieve = compat_urlretrieve
 
 
 try:
 try:
+    from HTMLParser import (
+        HTMLParser as compat_HTMLParser,
+        HTMLParseError as compat_HTMLParseError)
+except ImportError:  # Python 3
     from html.parser import HTMLParser as compat_HTMLParser
     from html.parser import HTMLParser as compat_HTMLParser
-except ImportError:  # Python 2
-    from HTMLParser import HTMLParser as compat_HTMLParser
-
-try:  # Python 2
-    from HTMLParser import HTMLParseError as compat_HTMLParseError
-except ImportError:  # Python <3.4
     try:
     try:
         from html.parser import HTMLParseError as compat_HTMLParseError
         from html.parser import HTMLParseError as compat_HTMLParseError
     except ImportError:  # Python >3.4
     except ImportError:  # Python >3.4
-
-        # HTMLParseError has been deprecated in Python 3.3 and removed in
+        # HTMLParseError was deprecated in Python 3.3 and removed in
         # Python 3.5. Introducing dummy exception for Python >3.5 for compatible
         # Python 3.5. Introducing dummy exception for Python >3.5 for compatible
         # and uniform cross-version exception handling
         # and uniform cross-version exception handling
         class compat_HTMLParseError(Exception):
         class compat_HTMLParseError(Exception):
             pass
             pass
+compat_html_parser_HTMLParser = compat_HTMLParser
+compat_html_parser_HTMLParseError = compat_HTMLParseError
 
 
 try:
 try:
-    from subprocess import DEVNULL
-    compat_subprocess_get_DEVNULL = lambda: DEVNULL
-except ImportError:
+    _DEVNULL = subprocess.DEVNULL
+    compat_subprocess_get_DEVNULL = lambda: _DEVNULL
+except AttributeError:
     compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
     compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
 
 
 try:
 try:
@@ -2373,15 +2448,12 @@ try:
 except ImportError:
 except ImportError:
     import BaseHTTPServer as compat_http_server
     import BaseHTTPServer as compat_http_server
 
 
-try:
-    compat_str = unicode  # Python 2
-except NameError:
-    compat_str = str
-
 try:
 try:
     from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
     from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
     from urllib.parse import unquote as compat_urllib_parse_unquote
     from urllib.parse import unquote as compat_urllib_parse_unquote
     from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
     from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
+    from urllib.parse import urlencode as compat_urllib_parse_urlencode
+    from urllib.parse import parse_qs as compat_parse_qs
 except ImportError:  # Python 2
 except ImportError:  # Python 2
     _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
     _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
                 else re.compile(r'([\x00-\x7f]+)'))
                 else re.compile(r'([\x00-\x7f]+)'))
@@ -2448,9 +2520,6 @@ except ImportError:  # Python 2
         string = string.replace('+', ' ')
         string = string.replace('+', ' ')
         return compat_urllib_parse_unquote(string, encoding, errors)
         return compat_urllib_parse_unquote(string, encoding, errors)
 
 
-try:
-    from urllib.parse import urlencode as compat_urllib_parse_urlencode
-except ImportError:  # Python 2
     # Python 2 will choke in urlencode on mixture of byte and unicode strings.
     # Python 2 will choke in urlencode on mixture of byte and unicode strings.
     # Possible solutions are to either port it from python 3 with all
     # Possible solutions are to either port it from python 3 with all
     # the friends or manually ensure input query contains only byte strings.
     # the friends or manually ensure input query contains only byte strings.
@@ -2472,7 +2541,62 @@ except ImportError:  # Python 2
         def encode_list(l):
         def encode_list(l):
             return [encode_elem(e) for e in l]
             return [encode_elem(e) for e in l]
 
 
-        return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
+        return compat_urllib_parse._urlencode(encode_elem(query), doseq=doseq)
+
+    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+    # Python 2's version is apparently totally broken
+    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+                   encoding='utf-8', errors='replace'):
+        qs, _coerce_result = qs, compat_str
+        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+        r = []
+        for name_value in pairs:
+            if not name_value and not strict_parsing:
+                continue
+            nv = name_value.split('=', 1)
+            if len(nv) != 2:
+                if strict_parsing:
+                    raise ValueError('bad query field: %r' % (name_value,))
+                # Handle case of a control-name with no equal sign
+                if keep_blank_values:
+                    nv.append('')
+                else:
+                    continue
+            if len(nv[1]) or keep_blank_values:
+                name = nv[0].replace('+', ' ')
+                name = compat_urllib_parse_unquote(
+                    name, encoding=encoding, errors=errors)
+                name = _coerce_result(name)
+                value = nv[1].replace('+', ' ')
+                value = compat_urllib_parse_unquote(
+                    value, encoding=encoding, errors=errors)
+                value = _coerce_result(value)
+                r.append((name, value))
+        return r
+
+    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+                        encoding='utf-8', errors='replace'):
+        parsed_result = {}
+        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
+                           encoding=encoding, errors=errors)
+        for name, value in pairs:
+            if name in parsed_result:
+                parsed_result[name].append(value)
+            else:
+                parsed_result[name] = [value]
+        return parsed_result
+
+    setattr(compat_urllib_parse, '_urlencode',
+            getattr(compat_urllib_parse, 'urlencode'))
+    for name, fix in (
+            ('unquote_to_bytes', compat_urllib_parse_unquote_to_bytes),
+            ('parse_unquote', compat_urllib_parse_unquote),
+            ('unquote_plus', compat_urllib_parse_unquote_plus),
+            ('urlencode', compat_urllib_parse_urlencode),
+            ('parse_qs', compat_parse_qs)):
+        setattr(compat_urllib_parse, name, fix)
+
+compat_urllib_parse_parse_qs = compat_parse_qs
 
 
 try:
 try:
     from urllib.request import DataHandler as compat_urllib_request_DataHandler
     from urllib.request import DataHandler as compat_urllib_request_DataHandler
@@ -2508,21 +2632,11 @@ except ImportError:  # Python < 3.4
 
 
             return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
             return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
 
 
-try:
-    compat_basestring = basestring  # Python 2
-except NameError:
-    compat_basestring = str
-
-try:
-    compat_chr = unichr  # Python 2
-except NameError:
-    compat_chr = chr
-
 try:
 try:
     from xml.etree.ElementTree import ParseError as compat_xml_parse_error
     from xml.etree.ElementTree import ParseError as compat_xml_parse_error
 except ImportError:  # Python 2.6
 except ImportError:  # Python 2.6
     from xml.parsers.expat import ExpatError as compat_xml_parse_error
     from xml.parsers.expat import ExpatError as compat_xml_parse_error
-
+compat_xml_etree_ElementTree_ParseError = compat_xml_parse_error
 
 
 etree = xml.etree.ElementTree
 etree = xml.etree.ElementTree
 
 
@@ -2536,10 +2650,11 @@ try:
     # xml.etree.ElementTree.Element is a method in Python <=2.6 and
     # xml.etree.ElementTree.Element is a method in Python <=2.6 and
     # the following will crash with:
     # the following will crash with:
     #  TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
     #  TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
-    isinstance(None, xml.etree.ElementTree.Element)
+    isinstance(None, etree.Element)
     from xml.etree.ElementTree import Element as compat_etree_Element
     from xml.etree.ElementTree import Element as compat_etree_Element
 except TypeError:  # Python <=2.6
 except TypeError:  # Python <=2.6
     from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
     from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
+compat_xml_etree_ElementTree_Element = compat_etree_Element
 
 
 if sys.version_info[0] >= 3:
 if sys.version_info[0] >= 3:
     def compat_etree_fromstring(text):
     def compat_etree_fromstring(text):
@@ -2595,6 +2710,7 @@ else:
             if k == uri or v == prefix:
             if k == uri or v == prefix:
                 del etree._namespace_map[k]
                 del etree._namespace_map[k]
         etree._namespace_map[uri] = prefix
         etree._namespace_map[uri] = prefix
+compat_xml_etree_register_namespace = compat_etree_register_namespace
 
 
 if sys.version_info < (2, 7):
 if sys.version_info < (2, 7):
     # Here comes the crazy part: In 2.6, if the xpath is a unicode,
     # Here comes the crazy part: In 2.6, if the xpath is a unicode,
@@ -2603,55 +2719,222 @@ if sys.version_info < (2, 7):
         if isinstance(xpath, compat_str):
         if isinstance(xpath, compat_str):
             xpath = xpath.encode('ascii')
             xpath = xpath.encode('ascii')
         return xpath
         return xpath
-else:
-    compat_xpath = lambda xpath: xpath
 
 
-try:
-    from urllib.parse import parse_qs as compat_parse_qs
-except ImportError:  # Python 2
-    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
-    # Python 2's version is apparently totally broken
-
-    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
-                   encoding='utf-8', errors='replace'):
-        qs, _coerce_result = qs, compat_str
-        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
-        r = []
-        for name_value in pairs:
-            if not name_value and not strict_parsing:
-                continue
-            nv = name_value.split('=', 1)
-            if len(nv) != 2:
-                if strict_parsing:
-                    raise ValueError('bad query field: %r' % (name_value,))
-                # Handle case of a control-name with no equal sign
-                if keep_blank_values:
-                    nv.append('')
+    # further code below based on CPython 2.7 source
+    import functools
+
+    _xpath_tokenizer_re = re.compile(r'''(?x)
+        (                                   # (1)
+            '[^']*'|"[^"]*"|                # quoted strings, or
+            ::|//?|\.\.|\(\)|[/.*:[\]()@=]  # navigation specials
+        )|                                  # or (2)
+        ((?:\{[^}]+\})?[^/[\]()@=\s]+)|     # token: optional {ns}, no specials
+        \s+                                 # or white space
+    ''')
+
+    def _xpath_tokenizer(pattern, namespaces=None):
+        for token in _xpath_tokenizer_re.findall(pattern):
+            tag = token[1]
+            if tag and tag[0] != "{" and ":" in tag:
+                try:
+                    if not namespaces:
+                        raise KeyError
+                    prefix, uri = tag.split(":", 1)
+                    yield token[0], "{%s}%s" % (namespaces[prefix], uri)
+                except KeyError:
+                    raise SyntaxError("prefix %r not found in prefix map" % prefix)
+            else:
+                yield token
+
+    def _get_parent_map(context):
+        parent_map = context.parent_map
+        if parent_map is None:
+            context.parent_map = parent_map = {}
+            for p in context.root.getiterator():
+                for e in p:
+                    parent_map[e] = p
+        return parent_map
+
+    def _select(context, result, filter_fn=lambda *_: True):
+        for elem in result:
+            for e in elem:
+                if filter_fn(e, elem):
+                    yield e
+
+    def _prepare_child(next_, token):
+        tag = token[1]
+        return functools.partial(_select, filter_fn=lambda e, _: e.tag == tag)
+
+    def _prepare_star(next_, token):
+        return _select
+
+    def _prepare_self(next_, token):
+        return lambda _, result: (e for e in result)
+
+    def _prepare_descendant(next_, token):
+        token = next(next_)
+        if token[0] == "*":
+            tag = "*"
+        elif not token[0]:
+            tag = token[1]
+        else:
+            raise SyntaxError("invalid descendant")
+
+        def select(context, result):
+            for elem in result:
+                for e in elem.getiterator(tag):
+                    if e is not elem:
+                        yield e
+        return select
+
+    def _prepare_parent(next_, token):
+        def select(context, result):
+            # FIXME: raise error if .. is applied at toplevel?
+            parent_map = _get_parent_map(context)
+            result_map = {}
+            for elem in result:
+                if elem in parent_map:
+                    parent = parent_map[elem]
+                    if parent not in result_map:
+                        result_map[parent] = None
+                        yield parent
+        return select
+
+    def _prepare_predicate(next_, token):
+        signature = []
+        predicate = []
+        for token in next_:
+            if token[0] == "]":
+                break
+            if token[0] and token[0][:1] in "'\"":
+                token = "'", token[0][1:-1]
+            signature.append(token[0] or "-")
+            predicate.append(token[1])
+
+        def select(context, result, filter_fn=lambda _: True):
+            for elem in result:
+                if filter_fn(elem):
+                    yield elem
+
+        signature = "".join(signature)
+        # use signature to determine predicate type
+        if signature == "@-":
+            # [@attribute] predicate
+            key = predicate[1]
+            return functools.partial(
+                select, filter_fn=lambda el: el.get(key) is not None)
+        if signature == "@-='":
+            # [@attribute='value']
+            key = predicate[1]
+            value = predicate[-1]
+            return functools.partial(
+                select, filter_fn=lambda el: el.get(key) == value)
+        if signature == "-" and not re.match(r"\d+$", predicate[0]):
+            # [tag]
+            tag = predicate[0]
+            return functools.partial(
+                select, filter_fn=lambda el: el.find(tag) is not None)
+        if signature == "-='" and not re.match(r"\d+$", predicate[0]):
+            # [tag='value']
+            tag = predicate[0]
+            value = predicate[-1]
+
+            def itertext(el):
+                for e in el.getiterator():
+                    e = e.text
+                    if e:
+                        yield e
+
+            def select(context, result):
+                for elem in result:
+                    for e in elem.findall(tag):
+                        if "".join(itertext(e)) == value:
+                            yield elem
+                            break
+            return select
+        if signature == "-" or signature == "-()" or signature == "-()-":
+            # [index] or [last()] or [last()-index]
+            if signature == "-":
+                index = int(predicate[0]) - 1
+            else:
+                if predicate[0] != "last":
+                    raise SyntaxError("unsupported function")
+                if signature == "-()-":
+                    try:
+                        index = int(predicate[2]) - 1
+                    except ValueError:
+                        raise SyntaxError("unsupported expression")
                 else:
                 else:
+                    index = -1
+
+            def select(context, result):
+                parent_map = _get_parent_map(context)
+                for elem in result:
+                    try:
+                        parent = parent_map[elem]
+                        # FIXME: what if the selector is "*" ?
+                        elems = list(parent.findall(elem.tag))
+                        if elems[index] is elem:
+                            yield elem
+                    except (IndexError, KeyError):
+                        pass
+            return select
+        raise SyntaxError("invalid predicate")
+
+    ops = {
+        "": _prepare_child,
+        "*": _prepare_star,
+        ".": _prepare_self,
+        "..": _prepare_parent,
+        "//": _prepare_descendant,
+        "[": _prepare_predicate,
+    }
+
+    _cache = {}
+
+    class _SelectorContext:
+        parent_map = None
+
+        def __init__(self, root):
+            self.root = root
+
+    ##
+    # Generate all matching objects.
+
+    def compat_etree_iterfind(elem, path, namespaces=None):
+        # compile selector pattern
+        if path[-1:] == "/":
+            path = path + "*"  # implicit all (FIXME: keep this?)
+        try:
+            selector = _cache[path]
+        except KeyError:
+            if len(_cache) > 100:
+                _cache.clear()
+            if path[:1] == "/":
+                raise SyntaxError("cannot use absolute path on element")
+            tokens = _xpath_tokenizer(path, namespaces)
+            selector = []
+            for token in tokens:
+                if token[0] == "/":
                     continue
                     continue
-            if len(nv[1]) or keep_blank_values:
-                name = nv[0].replace('+', ' ')
-                name = compat_urllib_parse_unquote(
-                    name, encoding=encoding, errors=errors)
-                name = _coerce_result(name)
-                value = nv[1].replace('+', ' ')
-                value = compat_urllib_parse_unquote(
-                    value, encoding=encoding, errors=errors)
-                value = _coerce_result(value)
-                r.append((name, value))
-        return r
+                try:
+                    selector.append(ops[token[0]](tokens, token))
+                except StopIteration:
+                    raise SyntaxError("invalid path")
+            _cache[path] = selector
+        # execute selector pattern
+        result = [elem]
+        context = _SelectorContext(elem)
+        for select in selector:
+            result = select(context, result)
+        return result
+
+    # end of code based on CPython 2.7 source
 
 
-    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
-                        encoding='utf-8', errors='replace'):
-        parsed_result = {}
-        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
-                           encoding=encoding, errors=errors)
-        for name, value in pairs:
-            if name in parsed_result:
-                parsed_result[name].append(value)
-            else:
-                parsed_result[name] = [value]
-        return parsed_result
+
+else:
+    compat_xpath = lambda xpath: xpath
+    compat_etree_iterfind = lambda element, match: element.iterfind(match)
 
 
 
 
 compat_os_name = os._name if os.name == 'java' else os.name
 compat_os_name = os._name if os.name == 'java' else os.name
@@ -2687,7 +2970,7 @@ except (AssertionError, UnicodeEncodeError):
 
 
 
 
 def compat_ord(c):
 def compat_ord(c):
-    if type(c) is int:
+    if isinstance(c, int):
         return c
         return c
     else:
     else:
         return ord(c)
         return ord(c)
@@ -2777,6 +3060,8 @@ else:
     else:
     else:
         compat_expanduser = os.path.expanduser
         compat_expanduser = os.path.expanduser
 
 
+compat_os_path_expanduser = compat_expanduser
+
 
 
 if compat_os_name == 'nt' and sys.version_info < (3, 8):
 if compat_os_name == 'nt' and sys.version_info < (3, 8):
     # os.path.realpath on Windows does not follow symbolic links
     # os.path.realpath on Windows does not follow symbolic links
@@ -2788,6 +3073,8 @@ if compat_os_name == 'nt' and sys.version_info < (3, 8):
 else:
 else:
     compat_realpath = os.path.realpath
     compat_realpath = os.path.realpath
 
 
+compat_os_path_realpath = compat_realpath
+
 
 
 if sys.version_info < (3, 0):
 if sys.version_info < (3, 0):
     def compat_print(s):
     def compat_print(s):
@@ -2808,11 +3095,15 @@ if sys.version_info < (3, 0) and sys.platform == 'win32':
 else:
 else:
     compat_getpass = getpass.getpass
     compat_getpass = getpass.getpass
 
 
+compat_getpass_getpass = compat_getpass
+
+
 try:
 try:
     compat_input = raw_input
     compat_input = raw_input
 except NameError:  # Python 3
 except NameError:  # Python 3
     compat_input = input
     compat_input = input
 
 
+
 # Python < 2.6.5 require kwargs to be bytes
 # Python < 2.6.5 require kwargs to be bytes
 try:
 try:
     def _testfunc(x):
     def _testfunc(x):
@@ -2863,6 +3154,51 @@ else:
     compat_socket_create_connection = socket.create_connection
     compat_socket_create_connection = socket.create_connection
 
 
 
 
+try:
+    from contextlib import suppress as compat_contextlib_suppress
+except ImportError:
+    class compat_contextlib_suppress(object):
+        _exceptions = None
+
+        def __init__(self, *exceptions):
+            super(compat_contextlib_suppress, self).__init__()
+            # TODO: [Base]ExceptionGroup (3.12+)
+            self._exceptions = exceptions
+
+        def __enter__(self):
+            return self
+
+        def __exit__(self, exc_type, exc_val, exc_tb):
+            return exc_type is not None and issubclass(exc_type, self._exceptions or tuple())
+
+
+# subprocess.Popen context manager
+# avoids leaking handles if .communicate() is not called
+try:
+    _Popen = subprocess.Popen
+    # check for required context manager attributes
+    _Popen.__enter__ and _Popen.__exit__
+    compat_subprocess_Popen = _Popen
+except AttributeError:
+    # not a context manager - make one
+    from contextlib import contextmanager
+
+    @contextmanager
+    def compat_subprocess_Popen(*args, **kwargs):
+        popen = None
+        try:
+            popen = _Popen(*args, **kwargs)
+            yield popen
+        finally:
+            if popen:
+                for f in (popen.stdin, popen.stdout, popen.stderr):
+                    if f:
+                        # repeated .close() is OK, but just in case
+                        with compat_contextlib_suppress(EnvironmentError):
+                            f.close()
+                popen.wait()
+
+
 # Fix https://github.com/ytdl-org/youtube-dl/issues/4223
 # Fix https://github.com/ytdl-org/youtube-dl/issues/4223
 # See http://bugs.python.org/issue9161 for what is broken
 # See http://bugs.python.org/issue9161 for what is broken
 def workaround_optparse_bug9161():
 def workaround_optparse_bug9161():
@@ -2890,6 +3226,7 @@ else:
     _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
     _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
 
 
     def compat_get_terminal_size(fallback=(80, 24)):
     def compat_get_terminal_size(fallback=(80, 24)):
+        from .utils import process_communicate_or_kill
         columns = compat_getenv('COLUMNS')
         columns = compat_getenv('COLUMNS')
         if columns:
         if columns:
             columns = int(columns)
             columns = int(columns)
@@ -2906,7 +3243,7 @@ else:
                 sp = subprocess.Popen(
                 sp = subprocess.Popen(
                     ['stty', 'size'],
                     ['stty', 'size'],
                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-                out, err = sp.communicate()
+                out, err = process_communicate_or_kill(sp)
                 _lines, _columns = map(int, out.split())
                 _lines, _columns = map(int, out.split())
             except Exception:
             except Exception:
                 _columns, _lines = _terminal_size(*fallback)
                 _columns, _lines = _terminal_size(*fallback)
@@ -2917,15 +3254,16 @@ else:
                 lines = _lines
                 lines = _lines
         return _terminal_size(columns, lines)
         return _terminal_size(columns, lines)
 
 
+
 try:
 try:
     itertools.count(start=0, step=1)
     itertools.count(start=0, step=1)
     compat_itertools_count = itertools.count
     compat_itertools_count = itertools.count
 except TypeError:  # Python 2.6
 except TypeError:  # Python 2.6
     def compat_itertools_count(start=0, step=1):
     def compat_itertools_count(start=0, step=1):
-        n = start
         while True:
         while True:
-            yield n
-            n += step
+            yield start
+            start += step
+
 
 
 if sys.version_info >= (3, 0):
 if sys.version_info >= (3, 0):
     from tokenize import tokenize as compat_tokenize_tokenize
     from tokenize import tokenize as compat_tokenize_tokenize
@@ -2984,7 +3322,6 @@ except ImportError:
     except ImportError:
     except ImportError:
         compat_filter = filter
         compat_filter = filter
 
 
-
 try:
 try:
     from future_builtins import zip as compat_zip
     from future_builtins import zip as compat_zip
 except ImportError:  # not 2.6+ or is 3.x
 except ImportError:  # not 2.6+ or is 3.x
@@ -2994,6 +3331,82 @@ except ImportError:  # not 2.6+ or is 3.x
         compat_zip = zip
         compat_zip = zip
 
 
 
 
+# method renamed between Py2/3
+try:
+    from itertools import zip_longest as compat_itertools_zip_longest
+except ImportError:
+    from itertools import izip_longest as compat_itertools_zip_longest
+
+
+# new class in collections
+try:
+    from collections import ChainMap as compat_collections_chain_map
+    # Py3.3's ChainMap is deficient
+    if sys.version_info < (3, 4):
+        raise ImportError
+except ImportError:
+    # Py <= 3.3
+    class compat_collections_chain_map(compat_collections_abc.MutableMapping):
+
+        maps = [{}]
+
+        def __init__(self, *maps):
+            self.maps = list(maps) or [{}]
+
+        def __getitem__(self, k):
+            for m in self.maps:
+                if k in m:
+                    return m[k]
+            raise KeyError(k)
+
+        def __setitem__(self, k, v):
+            self.maps[0].__setitem__(k, v)
+            return
+
+        def __contains__(self, k):
+            return any((k in m) for m in self.maps)
+
+        def __delitem(self, k):
+            if k in self.maps[0]:
+                del self.maps[0][k]
+                return
+            raise KeyError(k)
+
+        def __delitem__(self, k):
+            self.__delitem(k)
+
+        def __iter__(self):
+            return itertools.chain(*reversed(self.maps))
+
+        def __len__(self):
+            return len(iter(self))
+
+        # to match Py3, don't del directly
+        def pop(self, k, *args):
+            if self.__contains__(k):
+                off = self.__getitem__(k)
+                self.__delitem(k)
+                return off
+            elif len(args) > 0:
+                return args[0]
+            raise KeyError(k)
+
+        def new_child(self, m=None, **kwargs):
+            m = m or {}
+            m.update(kwargs)
+            return compat_collections_chain_map(m, *self.maps)
+
+        @property
+        def parents(self):
+            return compat_collections_chain_map(*(self.maps[1:]))
+
+
+# Pythons disagree on the type of a pattern (RegexObject, _sre.SRE_Pattern, Pattern, ...?)
+compat_re_Pattern = type(re.compile(''))
+# and on the type of a match
+compat_re_Match = type(re.match('a', 'a'))
+
+
 if sys.version_info < (3, 3):
 if sys.version_info < (3, 3):
     def compat_b64decode(s, *args, **kwargs):
     def compat_b64decode(s, *args, **kwargs):
         if isinstance(s, compat_str):
         if isinstance(s, compat_str):
@@ -3002,6 +3415,8 @@ if sys.version_info < (3, 3):
 else:
 else:
     compat_b64decode = base64.b64decode
     compat_b64decode = base64.b64decode
 
 
+compat_base64_b64decode = compat_b64decode
+
 
 
 if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
 if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
     # PyPy2 prior to version 5.4.0 expects byte strings as Windows function
     # PyPy2 prior to version 5.4.0 expects byte strings as Windows function
@@ -3021,28 +3436,97 @@ else:
         return ctypes.WINFUNCTYPE(*args, **kwargs)
         return ctypes.WINFUNCTYPE(*args, **kwargs)
 
 
 
 
-__all__ = [
+if sys.version_info < (3, 0):
+    # open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None
+    def compat_open(file_, *args, **kwargs):
+        if len(args) > 6 or 'opener' in kwargs:
+            raise ValueError('open: unsupported argument "opener"')
+        return io.open(file_, *args, **kwargs)
+else:
+    compat_open = open
+
+
+# compat_register_utf8
+def compat_register_utf8():
+    if sys.platform == 'win32':
+        # https://github.com/ytdl-org/youtube-dl/issues/820
+        from codecs import register, lookup
+        register(
+            lambda name: lookup('utf-8') if name == 'cp65001' else None)
+
+
+# compat_datetime_timedelta_total_seconds
+try:
+    compat_datetime_timedelta_total_seconds = datetime.timedelta.total_seconds
+except AttributeError:
+    # Py 2.6
+    def compat_datetime_timedelta_total_seconds(td):
+        return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
+
+# optional decompression packages
+# PyPi brotli package implements 'br' Content-Encoding
+try:
+    import brotli as compat_brotli
+except ImportError:
+    compat_brotli = None
+# PyPi ncompress package implements 'compress' Content-Encoding
+try:
+    import ncompress as compat_ncompress
+except ImportError:
+    compat_ncompress = None
+
+
+legacy = [
     'compat_HTMLParseError',
     'compat_HTMLParseError',
     'compat_HTMLParser',
     'compat_HTMLParser',
     'compat_HTTPError',
     'compat_HTTPError',
-    'compat_Struct',
     'compat_b64decode',
     'compat_b64decode',
-    'compat_basestring',
-    'compat_chr',
-    'compat_collections_abc',
     'compat_cookiejar',
     'compat_cookiejar',
     'compat_cookiejar_Cookie',
     'compat_cookiejar_Cookie',
     'compat_cookies',
     'compat_cookies',
     'compat_cookies_SimpleCookie',
     'compat_cookies_SimpleCookie',
-    'compat_ctypes_WINFUNCTYPE',
     'compat_etree_Element',
     'compat_etree_Element',
-    'compat_etree_fromstring',
     'compat_etree_register_namespace',
     'compat_etree_register_namespace',
     'compat_expanduser',
     'compat_expanduser',
+    'compat_getpass',
+    'compat_parse_qs',
+    'compat_realpath',
+    'compat_urllib_parse_parse_qs',
+    'compat_urllib_parse_unquote',
+    'compat_urllib_parse_unquote_plus',
+    'compat_urllib_parse_unquote_to_bytes',
+    'compat_urllib_parse_urlencode',
+    'compat_urllib_parse_urlparse',
+    'compat_urlparse',
+    'compat_urlretrieve',
+    'compat_xml_parse_error',
+]
+
+
+__all__ = [
+    'compat_html_parser_HTMLParseError',
+    'compat_html_parser_HTMLParser',
+    'compat_Struct',
+    'compat_base64_b64decode',
+    'compat_basestring',
+    'compat_brotli',
+    'compat_casefold',
+    'compat_chr',
+    'compat_collections_abc',
+    'compat_collections_chain_map',
+    'compat_datetime_timedelta_total_seconds',
+    'compat_http_cookiejar',
+    'compat_http_cookiejar_Cookie',
+    'compat_http_cookies',
+    'compat_http_cookies_SimpleCookie',
+    'compat_contextlib_suppress',
+    'compat_ctypes_WINFUNCTYPE',
+    'compat_etree_fromstring',
+    'compat_etree_iterfind',
     'compat_filter',
     'compat_filter',
     'compat_get_terminal_size',
     'compat_get_terminal_size',
     'compat_getenv',
     'compat_getenv',
-    'compat_getpass',
+    'compat_getpass_getpass',
     'compat_html_entities',
     'compat_html_entities',
     'compat_html_entities_html5',
     'compat_html_entities_html5',
     'compat_http_client',
     'compat_http_client',
@@ -3050,14 +3534,20 @@ __all__ = [
     'compat_input',
     'compat_input',
     'compat_integer_types',
     'compat_integer_types',
     'compat_itertools_count',
     'compat_itertools_count',
+    'compat_itertools_zip_longest',
     'compat_kwargs',
     'compat_kwargs',
     'compat_map',
     'compat_map',
+    'compat_ncompress',
     'compat_numeric_types',
     'compat_numeric_types',
+    'compat_open',
     'compat_ord',
     'compat_ord',
     'compat_os_name',
     'compat_os_name',
-    'compat_parse_qs',
+    'compat_os_path_expanduser',
+    'compat_os_path_realpath',
     'compat_print',
     'compat_print',
-    'compat_realpath',
+    'compat_re_Match',
+    'compat_re_Pattern',
+    'compat_register_utf8',
     'compat_setenv',
     'compat_setenv',
     'compat_shlex_quote',
     'compat_shlex_quote',
     'compat_shlex_split',
     'compat_shlex_split',
@@ -3066,20 +3556,18 @@ __all__ = [
     'compat_struct_pack',
     'compat_struct_pack',
     'compat_struct_unpack',
     'compat_struct_unpack',
     'compat_subprocess_get_DEVNULL',
     'compat_subprocess_get_DEVNULL',
+    'compat_subprocess_Popen',
     'compat_tokenize_tokenize',
     'compat_tokenize_tokenize',
     'compat_urllib_error',
     'compat_urllib_error',
     'compat_urllib_parse',
     'compat_urllib_parse',
-    'compat_urllib_parse_unquote',
-    'compat_urllib_parse_unquote_plus',
-    'compat_urllib_parse_unquote_to_bytes',
-    'compat_urllib_parse_urlencode',
-    'compat_urllib_parse_urlparse',
     'compat_urllib_request',
     'compat_urllib_request',
     'compat_urllib_request_DataHandler',
     'compat_urllib_request_DataHandler',
     'compat_urllib_response',
     'compat_urllib_response',
-    'compat_urlparse',
-    'compat_urlretrieve',
-    'compat_xml_parse_error',
+    'compat_urllib_request_urlretrieve',
+    'compat_urllib_HTTPError',
+    'compat_xml_etree_ElementTree_Element',
+    'compat_xml_etree_ElementTree_ParseError',
+    'compat_xml_etree_register_namespace',
     'compat_xpath',
     'compat_xpath',
     'compat_zip',
     'compat_zip',
     'workaround_optparse_bug9161',
     'workaround_optparse_bug9161',

+ 3 - 0
youtube_dl/downloader/__init__.py

@@ -50,6 +50,9 @@ def _get_suitable_downloader(info_dict, params={}):
         ed = get_external_downloader(external_downloader)
         ed = get_external_downloader(external_downloader)
         if ed.can_download(info_dict):
         if ed.can_download(info_dict):
             return ed
             return ed
+        # Avoid using unwanted args since external_downloader was rejected
+        if params.get('external_downloader_args'):
+            params['external_downloader_args'] = None
 
 
     protocol = info_dict['protocol']
     protocol = info_dict['protocol']
     if protocol.startswith('m3u8') and info_dict.get('is_live'):
     if protocol.startswith('m3u8') and info_dict.get('is_live'):

+ 21 - 7
youtube_dl/downloader/common.py

@@ -88,17 +88,21 @@ class FileDownloader(object):
             return '---.-%'
             return '---.-%'
         return '%6s' % ('%3.1f%%' % percent)
         return '%6s' % ('%3.1f%%' % percent)
 
 
-    @staticmethod
-    def calc_eta(start, now, total, current):
+    @classmethod
+    def calc_eta(cls, start_or_rate, now_or_remaining, *args):
+        if len(args) < 2:
+            rate, remaining = (start_or_rate, now_or_remaining)
+            if None in (rate, remaining):
+                return None
+            return int(float(remaining) / rate)
+        start, now = (start_or_rate, now_or_remaining)
+        total, current = args[:2]
         if total is None:
         if total is None:
             return None
             return None
         if now is None:
         if now is None:
             now = time.time()
             now = time.time()
-        dif = now - start
-        if current == 0 or dif < 0.001:  # One millisecond
-            return None
-        rate = float(current) / dif
-        return int((float(total) - float(current)) / rate)
+        rate = cls.calc_speed(start, now, current)
+        return rate and int((float(total) - float(current)) / rate)
 
 
     @staticmethod
     @staticmethod
     def format_eta(eta):
     def format_eta(eta):
@@ -123,6 +127,12 @@ class FileDownloader(object):
     def format_retries(retries):
     def format_retries(retries):
         return 'inf' if retries == float('inf') else '%.0f' % retries
         return 'inf' if retries == float('inf') else '%.0f' % retries
 
 
+    @staticmethod
+    def filesize_or_none(unencoded_filename):
+        fn = encodeFilename(unencoded_filename)
+        if os.path.isfile(fn):
+            return os.path.getsize(fn)
+
     @staticmethod
     @staticmethod
     def best_block_size(elapsed_time, bytes):
     def best_block_size(elapsed_time, bytes):
         new_min = max(bytes / 2.0, 1.0)
         new_min = max(bytes / 2.0, 1.0)
@@ -329,6 +339,10 @@ class FileDownloader(object):
     def download(self, filename, info_dict):
     def download(self, filename, info_dict):
         """Download to a filename using the info from info_dict
         """Download to a filename using the info from info_dict
         Return True on success and False otherwise
         Return True on success and False otherwise
+
+        This method filters the `Cookie` header from the info_dict to prevent leaks.
+        Downloaders have their own way of handling cookies.
+        See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
         """
         """
 
 
         nooverwrites_and_exists = (
         nooverwrites_and_exists = (

+ 25 - 22
youtube_dl/downloader/dash.py

@@ -1,5 +1,7 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
+import itertools
+
 from .fragment import FragmentFD
 from .fragment import FragmentFD
 from ..compat import compat_urllib_error
 from ..compat import compat_urllib_error
 from ..utils import (
 from ..utils import (
@@ -30,26 +32,28 @@ class DashSegmentsFD(FragmentFD):
         fragment_retries = self.params.get('fragment_retries', 0)
         fragment_retries = self.params.get('fragment_retries', 0)
         skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
         skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
 
 
-        frag_index = 0
-        for i, fragment in enumerate(fragments):
-            frag_index += 1
+        for frag_index, fragment in enumerate(fragments, 1):
             if frag_index <= ctx['fragment_index']:
             if frag_index <= ctx['fragment_index']:
                 continue
                 continue
+            success = False
             # In DASH, the first segment contains necessary headers to
             # In DASH, the first segment contains necessary headers to
             # generate a valid MP4 file, so always abort for the first segment
             # generate a valid MP4 file, so always abort for the first segment
-            fatal = i == 0 or not skip_unavailable_fragments
-            count = 0
-            while count <= fragment_retries:
+            fatal = frag_index == 1 or not skip_unavailable_fragments
+            fragment_url = fragment.get('url')
+            if not fragment_url:
+                assert fragment_base_url
+                fragment_url = urljoin(fragment_base_url, fragment['path'])
+            headers = info_dict.get('http_headers')
+            fragment_range = fragment.get('range')
+            if fragment_range:
+                headers = headers.copy() if headers else {}
+                headers['Range'] = 'bytes=%s' % (fragment_range,)
+            for count in itertools.count():
                 try:
                 try:
-                    fragment_url = fragment.get('url')
-                    if not fragment_url:
-                        assert fragment_base_url
-                        fragment_url = urljoin(fragment_base_url, fragment['path'])
-                    success, frag_content = self._download_fragment(ctx, fragment_url, info_dict)
+                    success, frag_content = self._download_fragment(ctx, fragment_url, info_dict, headers)
                     if not success:
                     if not success:
                         return False
                         return False
                     self._append_fragment(ctx, frag_content)
                     self._append_fragment(ctx, frag_content)
-                    break
                 except compat_urllib_error.HTTPError as err:
                 except compat_urllib_error.HTTPError as err:
                     # YouTube may often return 404 HTTP error for a fragment causing the
                     # YouTube may often return 404 HTTP error for a fragment causing the
                     # whole download to fail. However if the same fragment is immediately
                     # whole download to fail. However if the same fragment is immediately
@@ -57,22 +61,21 @@ class DashSegmentsFD(FragmentFD):
                     # is usually enough) thus allowing to download the whole file successfully.
                     # is usually enough) thus allowing to download the whole file successfully.
                     # To be future-proof we will retry all fragments that fail with any
                     # To be future-proof we will retry all fragments that fail with any
                     # HTTP error.
                     # HTTP error.
-                    count += 1
-                    if count <= fragment_retries:
-                        self.report_retry_fragment(err, frag_index, count, fragment_retries)
+                    if count < fragment_retries:
+                        self.report_retry_fragment(err, frag_index, count + 1, fragment_retries)
+                        continue
                 except DownloadError:
                 except DownloadError:
                     # Don't retry fragment if error occurred during HTTP downloading
                     # Don't retry fragment if error occurred during HTTP downloading
-                    # itself since it has own retry settings
-                    if not fatal:
-                        self.report_skip_fragment(frag_index)
-                        break
-                    raise
+                    # itself since it has its own retry settings
+                    if fatal:
+                        raise
+                break
 
 
-            if count > fragment_retries:
+            if not success:
                 if not fatal:
                 if not fatal:
                     self.report_skip_fragment(frag_index)
                     self.report_skip_fragment(frag_index)
                     continue
                     continue
-                self.report_error('giving up after %s fragment retries' % fragment_retries)
+                self.report_error('giving up after %s fragment retries' % count)
                 return False
                 return False
 
 
         self._finish_frag_download(ctx)
         self._finish_frag_download(ctx)

+ 203 - 38
youtube_dl/downloader/external.py

@@ -1,17 +1,24 @@
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-import os.path
+import os
 import re
 import re
 import subprocess
 import subprocess
 import sys
 import sys
+import tempfile
 import time
 import time
 
 
 from .common import FileDownloader
 from .common import FileDownloader
 from ..compat import (
 from ..compat import (
     compat_setenv,
     compat_setenv,
     compat_str,
     compat_str,
+    compat_subprocess_Popen,
 )
 )
-from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
+
+try:
+    from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
+except ImportError:
+    FFmpegPostProcessor = None
+
 from ..utils import (
 from ..utils import (
     cli_option,
     cli_option,
     cli_valueless_option,
     cli_valueless_option,
@@ -22,6 +29,9 @@ from ..utils import (
     handle_youtubedl_headers,
     handle_youtubedl_headers,
     check_executable,
     check_executable,
     is_outdated_version,
     is_outdated_version,
+    process_communicate_or_kill,
+    T,
+    traverse_obj,
 )
 )
 
 
 
 
@@ -29,6 +39,7 @@ class ExternalFD(FileDownloader):
     def real_download(self, filename, info_dict):
     def real_download(self, filename, info_dict):
         self.report_destination(filename)
         self.report_destination(filename)
         tmpfilename = self.temp_name(filename)
         tmpfilename = self.temp_name(filename)
+        self._cookies_tempfile = None
 
 
         try:
         try:
             started = time.time()
             started = time.time()
@@ -41,6 +52,13 @@ class ExternalFD(FileDownloader):
             # should take place
             # should take place
             retval = 0
             retval = 0
             self.to_screen('[%s] Interrupted by user' % self.get_basename())
             self.to_screen('[%s] Interrupted by user' % self.get_basename())
+        finally:
+            if self._cookies_tempfile and os.path.isfile(self._cookies_tempfile):
+                try:
+                    os.remove(self._cookies_tempfile)
+                except OSError:
+                    self.report_warning(
+                        'Unable to delete temporary cookies file "{0}"'.format(self._cookies_tempfile))
 
 
         if retval == 0:
         if retval == 0:
             status = {
             status = {
@@ -96,6 +114,16 @@ class ExternalFD(FileDownloader):
     def _configuration_args(self, default=[]):
     def _configuration_args(self, default=[]):
         return cli_configuration_args(self.params, 'external_downloader_args', default)
         return cli_configuration_args(self.params, 'external_downloader_args', default)
 
 
+    def _write_cookies(self):
+        if not self.ydl.cookiejar.filename:
+            tmp_cookies = tempfile.NamedTemporaryFile(suffix='.cookies', delete=False)
+            tmp_cookies.close()
+            self._cookies_tempfile = tmp_cookies.name
+            self.to_screen('[download] Writing temporary cookies file to "{0}"'.format(self._cookies_tempfile))
+        # real_download resets _cookies_tempfile; if it's None, save() will write to cookiejar.filename
+        self.ydl.cookiejar.save(self._cookies_tempfile, ignore_discard=True, ignore_expires=True)
+        return self.ydl.cookiejar.filename or self._cookies_tempfile
+
     def _call_downloader(self, tmpfilename, info_dict):
     def _call_downloader(self, tmpfilename, info_dict):
         """ Either overwrite this or implement _make_cmd """
         """ Either overwrite this or implement _make_cmd """
         cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
         cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
@@ -104,18 +132,26 @@ class ExternalFD(FileDownloader):
 
 
         p = subprocess.Popen(
         p = subprocess.Popen(
             cmd, stderr=subprocess.PIPE)
             cmd, stderr=subprocess.PIPE)
-        _, stderr = p.communicate()
+        _, stderr = process_communicate_or_kill(p)
         if p.returncode != 0:
         if p.returncode != 0:
             self.to_stderr(stderr.decode('utf-8', 'replace'))
             self.to_stderr(stderr.decode('utf-8', 'replace'))
         return p.returncode
         return p.returncode
 
 
+    @staticmethod
+    def _header_items(info_dict):
+        return traverse_obj(
+            info_dict, ('http_headers', T(dict.items), Ellipsis))
+
 
 
 class CurlFD(ExternalFD):
 class CurlFD(ExternalFD):
     AVAILABLE_OPT = '-V'
     AVAILABLE_OPT = '-V'
 
 
     def _make_cmd(self, tmpfilename, info_dict):
     def _make_cmd(self, tmpfilename, info_dict):
-        cmd = [self.exe, '--location', '-o', tmpfilename]
-        for key, val in info_dict['http_headers'].items():
+        cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed']
+        cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
+        if cookie_header:
+            cmd += ['--cookie', cookie_header]
+        for key, val in self._header_items(info_dict):
             cmd += ['--header', '%s: %s' % (key, val)]
             cmd += ['--header', '%s: %s' % (key, val)]
         cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
         cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
         cmd += self._valueless_option('--silent', 'noprogress')
         cmd += self._valueless_option('--silent', 'noprogress')
@@ -141,7 +177,7 @@ class CurlFD(ExternalFD):
 
 
         # curl writes the progress to stderr so don't capture it.
         # curl writes the progress to stderr so don't capture it.
         p = subprocess.Popen(cmd)
         p = subprocess.Popen(cmd)
-        p.communicate()
+        process_communicate_or_kill(p)
         return p.returncode
         return p.returncode
 
 
 
 
@@ -150,8 +186,11 @@ class AxelFD(ExternalFD):
 
 
     def _make_cmd(self, tmpfilename, info_dict):
     def _make_cmd(self, tmpfilename, info_dict):
         cmd = [self.exe, '-o', tmpfilename]
         cmd = [self.exe, '-o', tmpfilename]
-        for key, val in info_dict['http_headers'].items():
+        for key, val in self._header_items(info_dict):
             cmd += ['-H', '%s: %s' % (key, val)]
             cmd += ['-H', '%s: %s' % (key, val)]
+        cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
+        if cookie_header:
+            cmd += ['-H', 'Cookie: {0}'.format(cookie_header), '--max-redirect=0']
         cmd += self._configuration_args()
         cmd += self._configuration_args()
         cmd += ['--', info_dict['url']]
         cmd += ['--', info_dict['url']]
         return cmd
         return cmd
@@ -161,8 +200,10 @@ class WgetFD(ExternalFD):
     AVAILABLE_OPT = '--version'
     AVAILABLE_OPT = '--version'
 
 
     def _make_cmd(self, tmpfilename, info_dict):
     def _make_cmd(self, tmpfilename, info_dict):
-        cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
-        for key, val in info_dict['http_headers'].items():
+        cmd = [self.exe, '-O', tmpfilename, '-nv', '--compression=auto']
+        if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
+            cmd += ['--load-cookies', self._write_cookies()]
+        for key, val in self._header_items(info_dict):
             cmd += ['--header', '%s: %s' % (key, val)]
             cmd += ['--header', '%s: %s' % (key, val)]
         cmd += self._option('--limit-rate', 'ratelimit')
         cmd += self._option('--limit-rate', 'ratelimit')
         retry = self._option('--tries', 'retries')
         retry = self._option('--tries', 'retries')
@@ -171,7 +212,10 @@ class WgetFD(ExternalFD):
                 retry[1] = '0'
                 retry[1] = '0'
             cmd += retry
             cmd += retry
         cmd += self._option('--bind-address', 'source_address')
         cmd += self._option('--bind-address', 'source_address')
-        cmd += self._option('--proxy', 'proxy')
+        proxy = self.params.get('proxy')
+        if proxy:
+            for var in ('http_proxy', 'https_proxy'):
+                cmd += ['--execute', '%s=%s' % (var, proxy)]
         cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
         cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
         cmd += self._configuration_args()
         cmd += self._configuration_args()
         cmd += ['--', info_dict['url']]
         cmd += ['--', info_dict['url']]
@@ -181,24 +225,121 @@ class WgetFD(ExternalFD):
 class Aria2cFD(ExternalFD):
 class Aria2cFD(ExternalFD):
     AVAILABLE_OPT = '-v'
     AVAILABLE_OPT = '-v'
 
 
+    @staticmethod
+    def _aria2c_filename(fn):
+        return fn if os.path.isabs(fn) else os.path.join('.', fn)
+
     def _make_cmd(self, tmpfilename, info_dict):
     def _make_cmd(self, tmpfilename, info_dict):
-        cmd = [self.exe, '-c']
-        cmd += self._configuration_args([
-            '--min-split-size', '1M', '--max-connection-per-server', '4'])
-        dn = os.path.dirname(tmpfilename)
-        if dn:
-            cmd += ['--dir', dn]
-        cmd += ['--out', os.path.basename(tmpfilename)]
-        for key, val in info_dict['http_headers'].items():
+        cmd = [self.exe, '-c',
+               '--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
+               '--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16']
+        if 'fragments' in info_dict:
+            cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true']
+        else:
+            cmd += ['--min-split-size', '1M']
+
+        if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
+            cmd += ['--load-cookies={0}'.format(self._write_cookies())]
+        for key, val in self._header_items(info_dict):
             cmd += ['--header', '%s: %s' % (key, val)]
             cmd += ['--header', '%s: %s' % (key, val)]
+        cmd += self._configuration_args(['--max-connection-per-server', '4'])
+        cmd += ['--out', os.path.basename(tmpfilename)]
+        cmd += self._option('--max-overall-download-limit', 'ratelimit')
         cmd += self._option('--interface', 'source_address')
         cmd += self._option('--interface', 'source_address')
         cmd += self._option('--all-proxy', 'proxy')
         cmd += self._option('--all-proxy', 'proxy')
         cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
         cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
         cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
         cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
-        cmd += ['--', info_dict['url']]
+        cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
+        cmd += self._configuration_args()
+
+        # aria2c strips out spaces from the beginning/end of filenames and paths.
+        # We work around this issue by adding a "./" to the beginning of the
+        # filename and relative path, and adding a "/" at the end of the path.
+        # See: https://github.com/yt-dlp/yt-dlp/issues/276
+        # https://github.com/ytdl-org/youtube-dl/issues/20312
+        # https://github.com/aria2/aria2/issues/1373
+        dn = os.path.dirname(tmpfilename)
+        if dn:
+            cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep]
+        if 'fragments' not in info_dict:
+            cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))]
+        cmd += ['--auto-file-renaming=false']
+        if 'fragments' in info_dict:
+            cmd += ['--file-allocation=none', '--uri-selector=inorder']
+            url_list_file = '%s.frag.urls' % (tmpfilename, )
+            url_list = []
+            for frag_index, fragment in enumerate(info_dict['fragments']):
+                fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
+                url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
+            stream, _ = self.sanitize_open(url_list_file, 'wb')
+            stream.write('\n'.join(url_list).encode())
+            stream.close()
+            cmd += ['-i', self._aria2c_filename(url_list_file)]
+        else:
+            cmd += ['--', info_dict['url']]
         return cmd
         return cmd
 
 
 
 
+class Aria2pFD(ExternalFD):
+    ''' Aria2pFD class
+    This class support to use aria2p as downloader.
+    (Aria2p, a command-line tool and Python library to interact with an aria2c daemon process
+    through JSON-RPC.)
+    It can help you to get download progress more easily.
+    To use aria2p as downloader, you need to install aria2c and aria2p, aria2p can download with pip.
+    Then run aria2c in the background and enable with the --enable-rpc option.
+    '''
+    try:
+        import aria2p
+        __avail = True
+    except ImportError:
+        __avail = False
+
+    @classmethod
+    def available(cls):
+        return cls.__avail
+
+    def _call_downloader(self, tmpfilename, info_dict):
+        aria2 = self.aria2p.API(
+            self.aria2p.Client(
+                host='http://localhost',
+                port=6800,
+                secret=''
+            )
+        )
+
+        options = {
+            'min-split-size': '1M',
+            'max-connection-per-server': 4,
+            'auto-file-renaming': 'false',
+        }
+        options['dir'] = os.path.dirname(tmpfilename) or os.path.abspath('.')
+        options['out'] = os.path.basename(tmpfilename)
+        if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
+            options['load-cookies'] = self._write_cookies()
+        options['header'] = []
+        for key, val in self._header_items(info_dict):
+            options['header'].append('{0}: {1}'.format(key, val))
+        download = aria2.add_uris([info_dict['url']], options)
+        status = {
+            'status': 'downloading',
+            'tmpfilename': tmpfilename,
+        }
+        started = time.time()
+        while download.status in ['active', 'waiting']:
+            download = aria2.get_download(download.gid)
+            status.update({
+                'downloaded_bytes': download.completed_length,
+                'total_bytes': download.total_length,
+                'elapsed': time.time() - started,
+                'eta': download.eta.total_seconds(),
+                'speed': download.download_speed,
+            })
+            self._hook_progress(status)
+            time.sleep(.5)
+        return download.status != 'complete'
+
+
 class HttpieFD(ExternalFD):
 class HttpieFD(ExternalFD):
     @classmethod
     @classmethod
     def available(cls):
     def available(cls):
@@ -206,25 +347,34 @@ class HttpieFD(ExternalFD):
 
 
     def _make_cmd(self, tmpfilename, info_dict):
     def _make_cmd(self, tmpfilename, info_dict):
         cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
         cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
-        for key, val in info_dict['http_headers'].items():
+        for key, val in self._header_items(info_dict):
             cmd += ['%s:%s' % (key, val)]
             cmd += ['%s:%s' % (key, val)]
+
+        # httpie 3.1.0+ removes the Cookie header on redirect, so this should be safe for now. [1]
+        # If we ever need cookie handling for redirects, we can export the cookiejar into a session. [2]
+        # 1: https://github.com/httpie/httpie/security/advisories/GHSA-9w4w-cpc8-h2fq
+        # 2: https://httpie.io/docs/cli/sessions
+        cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
+        if cookie_header:
+            cmd += ['Cookie:%s' % cookie_header]
         return cmd
         return cmd
 
 
 
 
 class FFmpegFD(ExternalFD):
 class FFmpegFD(ExternalFD):
     @classmethod
     @classmethod
     def supports(cls, info_dict):
     def supports(cls, info_dict):
-        return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
+        return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms', 'http_dash_segments')
 
 
     @classmethod
     @classmethod
     def available(cls):
     def available(cls):
-        return FFmpegPostProcessor().available
+        # actual availability can only be confirmed for an instance
+        return bool(FFmpegPostProcessor)
 
 
     def _call_downloader(self, tmpfilename, info_dict):
     def _call_downloader(self, tmpfilename, info_dict):
-        url = info_dict['url']
-        ffpp = FFmpegPostProcessor(downloader=self)
+        # `downloader` means the parent `YoutubeDL`
+        ffpp = FFmpegPostProcessor(downloader=self.ydl)
         if not ffpp.available:
         if not ffpp.available:
-            self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
+            self.report_error('ffmpeg required for download but no ffmpeg (nor avconv) executable could be found. Please install one.')
             return False
             return False
         ffpp.check_version()
         ffpp.check_version()
 
 
@@ -253,7 +403,15 @@ class FFmpegFD(ExternalFD):
         # if end_time:
         # if end_time:
         #     args += ['-t', compat_str(end_time - start_time)]
         #     args += ['-t', compat_str(end_time - start_time)]
 
 
-        if info_dict['http_headers'] and re.match(r'^https?://', url):
+        url = info_dict['url']
+        cookies = self.ydl.cookiejar.get_cookies_for_url(url)
+        if cookies:
+            args.extend(['-cookies', ''.join(
+                '{0}={1}; path={2}; domain={3};\r\n'.format(
+                    cookie.name, cookie.value, cookie.path, cookie.domain)
+                for cookie in cookies)])
+
+        if info_dict.get('http_headers') and re.match(r'^https?://', url):
             # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
             # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
             # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
             # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
             headers = handle_youtubedl_headers(info_dict['http_headers'])
             headers = handle_youtubedl_headers(info_dict['http_headers'])
@@ -333,18 +491,25 @@ class FFmpegFD(ExternalFD):
 
 
         self._debug_cmd(args)
         self._debug_cmd(args)
 
 
-        proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
-        try:
-            retval = proc.wait()
-        except KeyboardInterrupt:
-            # subprocces.run would send the SIGKILL signal to ffmpeg and the
-            # mp4 file couldn't be played, but if we ask ffmpeg to quit it
-            # produces a file that is playable (this is mostly useful for live
-            # streams). Note that Windows is not affected and produces playable
-            # files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
-            if sys.platform != 'win32':
-                proc.communicate(b'q')
-            raise
+        # From [1], a PIPE opened in Popen() should be closed, unless
+        # .communicate() is called. Avoid leaking any PIPEs by using Popen
+        # as a context manager (newer Python 3.x and compat)
+        # Fixes "Resource Warning" in test/test_downloader_external.py
+        # [1] https://devpress.csdn.net/python/62fde12d7e66823466192e48.html
+        with compat_subprocess_Popen(args, stdin=subprocess.PIPE, env=env) as proc:
+            try:
+                retval = proc.wait()
+            except BaseException as e:
+                # subprocess.run would send the SIGKILL signal to ffmpeg and the
+                # mp4 file couldn't be played, but if we ask ffmpeg to quit it
+                # produces a file that is playable (this is mostly useful for live
+                # streams). Note that Windows is not affected and produces playable
+                # files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
+                if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32':
+                    process_communicate_or_kill(proc, b'q')
+                else:
+                    proc.kill()
+                raise
         return retval
         return retval
 
 
 
 

+ 31 - 15
youtube_dl/downloader/fragment.py

@@ -71,7 +71,7 @@ class FragmentFD(FileDownloader):
 
 
     @staticmethod
     @staticmethod
     def __do_ytdl_file(ctx):
     def __do_ytdl_file(ctx):
-        return not ctx['live'] and not ctx['tmpfilename'] == '-'
+        return ctx['live'] is not True and ctx['tmpfilename'] != '-'
 
 
     def _read_ytdl_file(self, ctx):
     def _read_ytdl_file(self, ctx):
         assert 'ytdl_corrupt' not in ctx
         assert 'ytdl_corrupt' not in ctx
@@ -101,6 +101,13 @@ class FragmentFD(FileDownloader):
             'url': frag_url,
             'url': frag_url,
             'http_headers': headers or info_dict.get('http_headers'),
             'http_headers': headers or info_dict.get('http_headers'),
         }
         }
+        frag_resume_len = 0
+        if ctx['dl'].params.get('continuedl', True):
+            frag_resume_len = self.filesize_or_none(
+                self.temp_name(fragment_filename))
+        fragment_info_dict['frag_resume_len'] = frag_resume_len
+        ctx['frag_resume_len'] = frag_resume_len or 0
+
         success = ctx['dl'].download(fragment_filename, fragment_info_dict)
         success = ctx['dl'].download(fragment_filename, fragment_info_dict)
         if not success:
         if not success:
             return False, None
             return False, None
@@ -124,9 +131,7 @@ class FragmentFD(FileDownloader):
             del ctx['fragment_filename_sanitized']
             del ctx['fragment_filename_sanitized']
 
 
     def _prepare_frag_download(self, ctx):
     def _prepare_frag_download(self, ctx):
-        if 'live' not in ctx:
-            ctx['live'] = False
-        if not ctx['live']:
+        if not ctx.setdefault('live', False):
             total_frags_str = '%d' % ctx['total_frags']
             total_frags_str = '%d' % ctx['total_frags']
             ad_frags = ctx.get('ad_frags', 0)
             ad_frags = ctx.get('ad_frags', 0)
             if ad_frags:
             if ad_frags:
@@ -136,10 +141,11 @@ class FragmentFD(FileDownloader):
         self.to_screen(
         self.to_screen(
             '[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
             '[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str))
         self.report_destination(ctx['filename'])
         self.report_destination(ctx['filename'])
+        continuedl = self.params.get('continuedl', True)
         dl = HttpQuietDownloader(
         dl = HttpQuietDownloader(
             self.ydl,
             self.ydl,
             {
             {
-                'continuedl': True,
+                'continuedl': continuedl,
                 'quiet': True,
                 'quiet': True,
                 'noprogress': True,
                 'noprogress': True,
                 'ratelimit': self.params.get('ratelimit'),
                 'ratelimit': self.params.get('ratelimit'),
@@ -150,12 +156,11 @@ class FragmentFD(FileDownloader):
         )
         )
         tmpfilename = self.temp_name(ctx['filename'])
         tmpfilename = self.temp_name(ctx['filename'])
         open_mode = 'wb'
         open_mode = 'wb'
-        resume_len = 0
 
 
         # Establish possible resume length
         # Establish possible resume length
-        if os.path.isfile(encodeFilename(tmpfilename)):
+        resume_len = self.filesize_or_none(tmpfilename) or 0
+        if resume_len > 0:
             open_mode = 'ab'
             open_mode = 'ab'
-            resume_len = os.path.getsize(encodeFilename(tmpfilename))
 
 
         # Should be initialized before ytdl file check
         # Should be initialized before ytdl file check
         ctx.update({
         ctx.update({
@@ -164,7 +169,8 @@ class FragmentFD(FileDownloader):
         })
         })
 
 
         if self.__do_ytdl_file(ctx):
         if self.__do_ytdl_file(ctx):
-            if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))):
+            ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename'])))
+            if continuedl and ytdl_file_exists:
                 self._read_ytdl_file(ctx)
                 self._read_ytdl_file(ctx)
                 is_corrupt = ctx.get('ytdl_corrupt') is True
                 is_corrupt = ctx.get('ytdl_corrupt') is True
                 is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
                 is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
@@ -178,7 +184,12 @@ class FragmentFD(FileDownloader):
                     if 'ytdl_corrupt' in ctx:
                     if 'ytdl_corrupt' in ctx:
                         del ctx['ytdl_corrupt']
                         del ctx['ytdl_corrupt']
                     self._write_ytdl_file(ctx)
                     self._write_ytdl_file(ctx)
+
             else:
             else:
+                if not continuedl:
+                    if ytdl_file_exists:
+                        self._read_ytdl_file(ctx)
+                    ctx['fragment_index'] = resume_len = 0
                 self._write_ytdl_file(ctx)
                 self._write_ytdl_file(ctx)
                 assert ctx['fragment_index'] == 0
                 assert ctx['fragment_index'] == 0
 
 
@@ -209,6 +220,7 @@ class FragmentFD(FileDownloader):
         start = time.time()
         start = time.time()
         ctx.update({
         ctx.update({
             'started': start,
             'started': start,
+            'fragment_started': start,
             # Amount of fragment's bytes downloaded by the time of the previous
             # Amount of fragment's bytes downloaded by the time of the previous
             # frag progress hook invocation
             # frag progress hook invocation
             'prev_frag_downloaded_bytes': 0,
             'prev_frag_downloaded_bytes': 0,
@@ -218,6 +230,9 @@ class FragmentFD(FileDownloader):
             if s['status'] not in ('downloading', 'finished'):
             if s['status'] not in ('downloading', 'finished'):
                 return
                 return
 
 
+            if not total_frags and ctx.get('fragment_count'):
+                state['fragment_count'] = ctx['fragment_count']
+
             time_now = time.time()
             time_now = time.time()
             state['elapsed'] = time_now - start
             state['elapsed'] = time_now - start
             frag_total_bytes = s.get('total_bytes') or 0
             frag_total_bytes = s.get('total_bytes') or 0
@@ -232,16 +247,17 @@ class FragmentFD(FileDownloader):
                 ctx['fragment_index'] = state['fragment_index']
                 ctx['fragment_index'] = state['fragment_index']
                 state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
                 state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
                 ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
                 ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
+                ctx['speed'] = state['speed'] = self.calc_speed(
+                    ctx['fragment_started'], time_now, frag_total_bytes)
+                ctx['fragment_started'] = time.time()
                 ctx['prev_frag_downloaded_bytes'] = 0
                 ctx['prev_frag_downloaded_bytes'] = 0
             else:
             else:
                 frag_downloaded_bytes = s['downloaded_bytes']
                 frag_downloaded_bytes = s['downloaded_bytes']
                 state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
                 state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
+                ctx['speed'] = state['speed'] = self.calc_speed(
+                    ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx['frag_resume_len'])
                 if not ctx['live']:
                 if not ctx['live']:
-                    state['eta'] = self.calc_eta(
-                        start, time_now, estimated_size - resume_len,
-                        state['downloaded_bytes'] - resume_len)
-                state['speed'] = s.get('speed') or ctx.get('speed')
-                ctx['speed'] = state['speed']
+                    state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes'])
                 ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
                 ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
             self._hook_progress(state)
             self._hook_progress(state)
 
 
@@ -268,7 +284,7 @@ class FragmentFD(FileDownloader):
                         os.utime(ctx['filename'], (time.time(), filetime))
                         os.utime(ctx['filename'], (time.time(), filetime))
                     except Exception:
                     except Exception:
                         pass
                         pass
-            downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename']))
+            downloaded_bytes = self.filesize_or_none(ctx['filename']) or 0
 
 
         self._hook_progress({
         self._hook_progress({
             'downloaded_bytes': downloaded_bytes,
             'downloaded_bytes': downloaded_bytes,

+ 8 - 10
youtube_dl/downloader/http.py

@@ -58,9 +58,9 @@ class HttpFD(FileDownloader):
 
 
         if self.params.get('continuedl', True):
         if self.params.get('continuedl', True):
             # Establish possible resume length
             # Establish possible resume length
-            if os.path.isfile(encodeFilename(ctx.tmpfilename)):
-                ctx.resume_len = os.path.getsize(
-                    encodeFilename(ctx.tmpfilename))
+            ctx.resume_len = info_dict.get('frag_resume_len')
+            if ctx.resume_len is None:
+                ctx.resume_len = self.filesize_or_none(ctx.tmpfilename) or 0
 
 
         ctx.is_resume = ctx.resume_len > 0
         ctx.is_resume = ctx.resume_len > 0
 
 
@@ -115,9 +115,9 @@ class HttpFD(FileDownloader):
                         raise RetryDownload(err)
                         raise RetryDownload(err)
                     raise err
                     raise err
                 # When trying to resume, Content-Range HTTP header of response has to be checked
                 # When trying to resume, Content-Range HTTP header of response has to be checked
-                # to match the value of requested Range HTTP header. This is due to a webservers
+                # to match the value of requested Range HTTP header. This is due to webservers
                 # that don't support resuming and serve a whole file with no Content-Range
                 # that don't support resuming and serve a whole file with no Content-Range
-                # set in response despite of requested Range (see
+                # set in response despite requested Range (see
                 # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
                 # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
                 if has_range:
                 if has_range:
                     content_range = ctx.data.headers.get('Content-Range')
                     content_range = ctx.data.headers.get('Content-Range')
@@ -141,7 +141,8 @@ class HttpFD(FileDownloader):
                     # Content-Range is either not present or invalid. Assuming remote webserver is
                     # Content-Range is either not present or invalid. Assuming remote webserver is
                     # trying to send the whole file, resume is not possible, so wiping the local file
                     # trying to send the whole file, resume is not possible, so wiping the local file
                     # and performing entire redownload
                     # and performing entire redownload
-                    self.report_unable_to_resume()
+                    if range_start > 0:
+                        self.report_unable_to_resume()
                     ctx.resume_len = 0
                     ctx.resume_len = 0
                     ctx.open_mode = 'wb'
                     ctx.open_mode = 'wb'
                 ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
                 ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
@@ -293,10 +294,7 @@ class HttpFD(FileDownloader):
 
 
                 # Progress message
                 # Progress message
                 speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
                 speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
-                if ctx.data_len is None:
-                    eta = None
-                else:
-                    eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
+                eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - byte_counter))
 
 
                 self._hook_progress({
                 self._hook_progress({
                     'status': 'downloading',
                     'status': 'downloading',

+ 6 - 4
youtube_dl/downloader/rtmp.py

@@ -89,11 +89,13 @@ class RtmpFD(FileDownloader):
                                 self.to_screen('')
                                 self.to_screen('')
                             cursor_in_new_line = True
                             cursor_in_new_line = True
                             self.to_screen('[rtmpdump] ' + line)
                             self.to_screen('[rtmpdump] ' + line)
-            finally:
+                if not cursor_in_new_line:
+                    self.to_screen('')
+                return proc.wait()
+            except BaseException:  # Including KeyboardInterrupt
+                proc.kill()
                 proc.wait()
                 proc.wait()
-            if not cursor_in_new_line:
-                self.to_screen('')
-            return proc.returncode
+                raise
 
 
         url = info_dict['url']
         url = info_dict['url']
         player_url = info_dict.get('player_url')
         player_url = info_dict.get('player_url')

+ 32 - 25
youtube_dl/extractor/adn.py

@@ -31,30 +31,34 @@ from ..utils import (
 
 
 
 
 class ADNIE(InfoExtractor):
 class ADNIE(InfoExtractor):
-    IE_DESC = 'Anime Digital Network'
-    _VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
-    _TEST = {
-        'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
-        'md5': '0319c99885ff5547565cacb4f3f9348d',
+    IE_DESC = 'Animation Digital Network'
+    _VALID_URL = r'https?://(?:www\.)?(?:animation|anime)digitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://animationdigitalnetwork.fr/video/fruits-basket/9841-episode-1-a-ce-soir',
+        'md5': '1c9ef066ceb302c86f80c2b371615261',
         'info_dict': {
         'info_dict': {
-            'id': '7778',
+            'id': '9841',
             'ext': 'mp4',
             'ext': 'mp4',
-            'title': 'Blue Exorcist - Kyôto Saga - Episode 1',
-            'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5',
-            'series': 'Blue Exorcist - Kyôto Saga',
-            'duration': 1467,
-            'release_date': '20170106',
+            'title': 'Fruits Basket - Episode 1',
+            'description': 'md5:14be2f72c3c96809b0ca424b0097d336',
+            'series': 'Fruits Basket',
+            'duration': 1437,
+            'release_date': '20190405',
             'comment_count': int,
             'comment_count': int,
             'average_rating': float,
             'average_rating': float,
-            'season_number': 2,
-            'episode': 'Début des hostilités',
+            'season_number': 1,
+            'episode': 'À ce soir !',
             'episode_number': 1,
             'episode_number': 1,
-        }
-    }
+        },
+        'skip': 'Only available in region (FR, ...)',
+    }, {
+        'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
+        'only_matching': True,
+    }]
 
 
-    _NETRC_MACHINE = 'animedigitalnetwork'
-    _BASE_URL = 'http://animedigitalnetwork.fr'
-    _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/'
+    _NETRC_MACHINE = 'animationdigitalnetwork'
+    _BASE = 'animationdigitalnetwork.fr'
+    _API_BASE_URL = 'https://gw.api.' + _BASE + '/'
     _PLAYER_BASE_URL = _API_BASE_URL + 'player/'
     _PLAYER_BASE_URL = _API_BASE_URL + 'player/'
     _HEADERS = {}
     _HEADERS = {}
     _LOGIN_ERR_MESSAGE = 'Unable to log in'
     _LOGIN_ERR_MESSAGE = 'Unable to log in'
@@ -82,14 +86,14 @@ class ADNIE(InfoExtractor):
         if subtitle_location:
         if subtitle_location:
             enc_subtitles = self._download_webpage(
             enc_subtitles = self._download_webpage(
                 subtitle_location, video_id, 'Downloading subtitles data',
                 subtitle_location, video_id, 'Downloading subtitles data',
-                fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'})
+                fatal=False, headers={'Origin': 'https://' + self._BASE})
         if not enc_subtitles:
         if not enc_subtitles:
             return None
             return None
 
 
-        # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
+        # http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
         dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
         dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
             bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
             bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
-            bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),
+            bytes_to_intlist(binascii.unhexlify(self._K + '7fac1178830cfe0c')),
             bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
             bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
         ))
         ))
         subtitles_json = self._parse_json(
         subtitles_json = self._parse_json(
@@ -138,9 +142,9 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
         if not username:
         if not username:
             return
             return
         try:
         try:
+            url = self._API_BASE_URL + 'authentication/login'
             access_token = (self._download_json(
             access_token = (self._download_json(
-                self._API_BASE_URL + 'authentication/login', None,
-                'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False,
+                url, None, 'Logging in', self._LOGIN_ERR_MESSAGE, fatal=False,
                 data=urlencode_postdata({
                 data=urlencode_postdata({
                     'password': password,
                     'password': password,
                     'rememberMe': False,
                     'rememberMe': False,
@@ -153,7 +157,8 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
             message = None
             message = None
             if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
             if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
                 resp = self._parse_json(
                 resp = self._parse_json(
-                    e.cause.read().decode(), None, fatal=False) or {}
+                    self._webpage_read_content(e.cause, url, username),
+                    username, fatal=False) or {}
                 message = resp.get('message') or resp.get('code')
                 message = resp.get('message') or resp.get('code')
             self.report_warning(message or self._LOGIN_ERR_MESSAGE)
             self.report_warning(message or self._LOGIN_ERR_MESSAGE)
 
 
@@ -211,7 +216,9 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
                     # This usually goes away with a different random pkcs1pad, so retry
                     # This usually goes away with a different random pkcs1pad, so retry
                     continue
                     continue
 
 
-                error = self._parse_json(e.cause.read(), video_id)
+                error = self._parse_json(
+                    self._webpage_read_content(e.cause, links_url, video_id),
+                    video_id, fatal=False) or {}
                 message = error.get('message')
                 message = error.get('message')
                 if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
                 if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
                     self.raise_geo_restricted(msg=message)
                     self.raise_geo_restricted(msg=message)

+ 20 - 7
youtube_dl/extractor/aenetworks.py

@@ -8,6 +8,8 @@ from ..utils import (
     ExtractorError,
     ExtractorError,
     GeoRestrictedError,
     GeoRestrictedError,
     int_or_none,
     int_or_none,
+    remove_start,
+    traverse_obj,
     update_url_query,
     update_url_query,
     urlencode_postdata,
     urlencode_postdata,
 )
 )
@@ -20,8 +22,8 @@ class AENetworksBaseIE(ThePlatformIE):
             (?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
             (?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
             fyi\.tv
             fyi\.tv
         )/'''
         )/'''
-    _THEPLATFORM_KEY = 'crazyjava'
-    _THEPLATFORM_SECRET = 's3cr3t'
+    _THEPLATFORM_KEY = '43jXaGRQud'
+    _THEPLATFORM_SECRET = 'S10BPXHMlb'
     _DOMAIN_MAP = {
     _DOMAIN_MAP = {
         'history.com': ('HISTORY', 'history'),
         'history.com': ('HISTORY', 'history'),
         'aetv.com': ('AETV', 'aetv'),
         'aetv.com': ('AETV', 'aetv'),
@@ -33,14 +35,17 @@ class AENetworksBaseIE(ThePlatformIE):
     }
     }
 
 
     def _extract_aen_smil(self, smil_url, video_id, auth=None):
     def _extract_aen_smil(self, smil_url, video_id, auth=None):
-        query = {'mbr': 'true'}
+        query = {
+            'mbr': 'true',
+            'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3',
+        }
         if auth:
         if auth:
             query['auth'] = auth
             query['auth'] = auth
         TP_SMIL_QUERY = [{
         TP_SMIL_QUERY = [{
             'assetTypes': 'high_video_ak',
             'assetTypes': 'high_video_ak',
-            'switch': 'hls_high_ak'
+            'switch': 'hls_high_ak',
         }, {
         }, {
-            'assetTypes': 'high_video_s3'
+            'assetTypes': 'high_video_s3',
         }, {
         }, {
             'assetTypes': 'high_video_s3',
             'assetTypes': 'high_video_s3',
             'switch': 'hls_high_fastly',
             'switch': 'hls_high_fastly',
@@ -75,7 +80,14 @@ class AENetworksBaseIE(ThePlatformIE):
         requestor_id, brand = self._DOMAIN_MAP[domain]
         requestor_id, brand = self._DOMAIN_MAP[domain]
         result = self._download_json(
         result = self._download_json(
             'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
             'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
-            filter_value, query={'filter[%s]' % filter_key: filter_value})['results'][0]
+            filter_value, query={'filter[%s]' % filter_key: filter_value})
+        result = traverse_obj(
+            result, ('results',
+                     lambda k, v: k == 0 and v[filter_key] == filter_value),
+            get_all=False)
+        if not result:
+            raise ExtractorError('Show not found in A&E feed (too new?)', expected=True,
+                                 video_id=remove_start(filter_value, '/'))
         title = result['title']
         title = result['title']
         video_id = result['id']
         video_id = result['id']
         media_url = result['publicUrl']
         media_url = result['publicUrl']
@@ -126,7 +138,7 @@ class AENetworksIE(AENetworksBaseIE):
             'skip_download': True,
             'skip_download': True,
         },
         },
         'add_ie': ['ThePlatform'],
         'add_ie': ['ThePlatform'],
-        'skip': 'This video is only available for users of participating TV providers.',
+        'skip': 'Geo-restricted - This content is not available in your location.'
     }, {
     }, {
         'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
         'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
         'info_dict': {
         'info_dict': {
@@ -143,6 +155,7 @@ class AENetworksIE(AENetworksBaseIE):
             'skip_download': True,
             'skip_download': True,
         },
         },
         'add_ie': ['ThePlatform'],
         'add_ie': ['ThePlatform'],
+        'skip': 'This video is only available for users of participating TV providers.',
     }, {
     }, {
         'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
         'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
         'only_matching': True
         'only_matching': True

+ 1 - 1
youtube_dl/extractor/aliexpress.py

@@ -18,7 +18,7 @@ class AliExpressLiveIE(InfoExtractor):
             'id': '2800002704436634',
             'id': '2800002704436634',
             'ext': 'mp4',
             'ext': 'mp4',
             'title': 'CASIMA7.22',
             'title': 'CASIMA7.22',
-            'thumbnail': r're:http://.*\.jpg',
+            'thumbnail': r're:https?://.*\.jpg',
             'uploader': 'CASIMA Official Store',
             'uploader': 'CASIMA Official Store',
             'timestamp': 1500717600,
             'timestamp': 1500717600,
             'upload_date': '20170722',
             'upload_date': '20170722',

+ 89 - 0
youtube_dl/extractor/alsace20tv.py

@@ -0,0 +1,89 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    clean_html,
+    dict_get,
+    get_element_by_class,
+    int_or_none,
+    unified_strdate,
+    url_or_none,
+)
+
+
+class Alsace20TVIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P<id>[\w]+)'
+    _TESTS = [{
+        'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html',
+        # 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
+        'info_dict': {
+            'id': 'lyNHCXpYJh',
+            'ext': 'mp4',
+            'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7',
+            'title': 'Votre JT du jeudi 3 février',
+            'upload_date': '20220203',
+            'thumbnail': r're:https?://.+\.jpg',
+            'duration': 1073,
+            'view_count': int,
+        },
+        'params': {
+            'format': 'bestvideo',
+        },
+    }]
+
+    def _extract_video(self, video_id, url=None):
+        info = self._download_json(
+            'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ),
+            video_id) or {}
+        title = info['titre']
+
+        formats = []
+        for res, fmt_url in (info.get('files') or {}).items():
+            formats.extend(
+                self._extract_smil_formats(fmt_url, video_id, fatal=False)
+                if '/smil:_' in fmt_url
+                else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
+        self._sort_formats(formats)
+
+        webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
+        thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage))
+        upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
+        upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'description': clean_html(get_element_by_class('wysiwyg', webpage)),
+            'upload_date': upload_date,
+            'thumbnail': thumbnail,
+            'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None),
+            'view_count': int_or_none(info.get('nb_vues')),
+        }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self._extract_video(video_id, url)
+
+
+class Alsace20TVEmbedIE(Alsace20TVIE):
+    _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P<id>[\w]+)'
+    _TESTS = [{
+        'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh',
+        # 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
+        'info_dict': {
+            'id': 'lyNHCXpYJh',
+            'ext': 'mp4',
+            'title': 'Votre JT du jeudi 3 février',
+            'upload_date': '20220203',
+            'thumbnail': r're:https?://.+\.jpg',
+            'view_count': int,
+        },
+        'params': {
+            'format': 'bestvideo',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self._extract_video(video_id)

+ 88 - 27
youtube_dl/extractor/americastestkitchen.py

@@ -15,7 +15,7 @@ from ..utils import (
 
 
 
 
 class AmericasTestKitchenIE(InfoExtractor):
 class AmericasTestKitchenIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?P<resource_type>episode|videos)/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?:cooks(?:country|illustrated)/)?(?P<resource_type>episode|videos)/(?P<id>\d+)'
     _TESTS = [{
     _TESTS = [{
         'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
         'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
         'md5': 'b861c3e365ac38ad319cfd509c30577f',
         'md5': 'b861c3e365ac38ad319cfd509c30577f',
@@ -23,15 +23,20 @@ class AmericasTestKitchenIE(InfoExtractor):
             'id': '5b400b9ee338f922cb06450c',
             'id': '5b400b9ee338f922cb06450c',
             'title': 'Japanese Suppers',
             'title': 'Japanese Suppers',
             'ext': 'mp4',
             'ext': 'mp4',
+            'display_id': 'weeknight-japanese-suppers',
             'description': 'md5:64e606bfee910627efc4b5f050de92b3',
             'description': 'md5:64e606bfee910627efc4b5f050de92b3',
-            'thumbnail': r're:^https?://',
-            'timestamp': 1523318400,
-            'upload_date': '20180410',
-            'release_date': '20180410',
+            'timestamp': 1523304000,
+            'upload_date': '20180409',
+            'release_date': '20180409',
             'series': "America's Test Kitchen",
             'series': "America's Test Kitchen",
+            'season': 'Season 18',
             'season_number': 18,
             'season_number': 18,
             'episode': 'Japanese Suppers',
             'episode': 'Japanese Suppers',
             'episode_number': 15,
             'episode_number': 15,
+            'duration': 1376,
+            'thumbnail': r're:^https?://',
+            'average_rating': 0,
+            'view_count': int,
         },
         },
         'params': {
         'params': {
             'skip_download': True,
             'skip_download': True,
@@ -44,15 +49,20 @@ class AmericasTestKitchenIE(InfoExtractor):
             'id': '5fbe8c61bda2010001c6763b',
             'id': '5fbe8c61bda2010001c6763b',
             'title': 'Simple Chicken Dinner',
             'title': 'Simple Chicken Dinner',
             'ext': 'mp4',
             'ext': 'mp4',
+            'display_id': 'atktv_2103_simple-chicken-dinner_full-episode_web-mp4',
             'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7',
             'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7',
-            'thumbnail': r're:^https?://',
-            'timestamp': 1610755200,
-            'upload_date': '20210116',
-            'release_date': '20210116',
+            'timestamp': 1610737200,
+            'upload_date': '20210115',
+            'release_date': '20210115',
             'series': "America's Test Kitchen",
             'series': "America's Test Kitchen",
+            'season': 'Season 21',
             'season_number': 21,
             'season_number': 21,
             'episode': 'Simple Chicken Dinner',
             'episode': 'Simple Chicken Dinner',
             'episode_number': 3,
             'episode_number': 3,
+            'duration': 1397,
+            'thumbnail': r're:^https?://',
+            'view_count': int,
+            'average_rating': 0,
         },
         },
         'params': {
         'params': {
             'skip_download': True,
             'skip_download': True,
@@ -60,6 +70,12 @@ class AmericasTestKitchenIE(InfoExtractor):
     }, {
     }, {
         'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
         'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
         'only_matching': True,
         'only_matching': True,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cookscountry/episode/564-when-only-chocolate-will-do',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cooksillustrated/videos/4478-beef-wellington',
+        'only_matching': True,
     }, {
     }, {
         'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
         'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
         'only_matching': True,
         'only_matching': True,
@@ -94,7 +110,7 @@ class AmericasTestKitchenIE(InfoExtractor):
 
 
 
 
 class AmericasTestKitchenSeasonIE(InfoExtractor):
 class AmericasTestKitchenSeasonIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|cookscountry)\.com/episodes/browse/season_(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|(?P<cooks>cooks(?:country|illustrated)))\.com(?:(?:/(?P<show2>cooks(?:country|illustrated)))?(?:/?$|(?<!ated)(?<!ated\.com)/episodes/browse/season_(?P<season>\d+)))'
     _TESTS = [{
     _TESTS = [{
         # ATK Season
         # ATK Season
         'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
         'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
@@ -105,48 +121,93 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
         'playlist_count': 13,
         'playlist_count': 13,
     }, {
     }, {
         # Cooks Country Season
         # Cooks Country Season
-        'url': 'https://www.cookscountry.com/episodes/browse/season_12',
+        'url': 'https://www.americastestkitchen.com/cookscountry/episodes/browse/season_12',
         'info_dict': {
         'info_dict': {
             'id': 'season_12',
             'id': 'season_12',
             'title': 'Season 12',
             'title': 'Season 12',
         },
         },
         'playlist_count': 13,
         'playlist_count': 13,
+    }, {
+        # America's Test Kitchen Series
+        'url': 'https://www.americastestkitchen.com/',
+        'info_dict': {
+            'id': 'americastestkitchen',
+            'title': 'America\'s Test Kitchen',
+        },
+        'playlist_count': 558,
+    }, {
+        # Cooks Country Series
+        'url': 'https://www.americastestkitchen.com/cookscountry',
+        'info_dict': {
+            'id': 'cookscountry',
+            'title': 'Cook\'s Country',
+        },
+        'playlist_count': 199,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cookscountry/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com/episodes/browse/season_12',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cooksillustrated/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cooksillustrated.com',
+        'only_matching': True,
     }]
     }]
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
-        show_name, season_number = re.match(self._VALID_URL, url).groups()
-        season_number = int(season_number)
+        match = re.match(self._VALID_URL, url).groupdict()
+        show = match.get('show2')
+        show_path = ('/' + show) if show else ''
+        show = show or match['show']
+        season_number = int_or_none(match.get('season'))
+
+        slug, title = {
+            'americastestkitchen': ('atk', 'America\'s Test Kitchen'),
+            'cookscountry': ('cco', 'Cook\'s Country'),
+            'cooksillustrated': ('cio', 'Cook\'s Illustrated'),
+        }[show]
 
 
-        slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
+        facet_filters = [
+            'search_document_klass:episode',
+            'search_show_slug:' + slug,
+        ]
 
 
-        season = 'Season %d' % season_number
+        if season_number:
+            playlist_id = 'season_%d' % season_number
+            playlist_title = 'Season %d' % season_number
+            facet_filters.append('search_season_list:' + playlist_title)
+        else:
+            playlist_id = show
+            playlist_title = title
 
 
         season_search = self._download_json(
         season_search = self._download_json(
             'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
             'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
-            season, headers={
-                'Origin': 'https://www.%s.com' % show_name,
+            playlist_id, headers={
+                'Origin': 'https://www.americastestkitchen.com',
                 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
                 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
                 'X-Algolia-Application-Id': 'Y1FNZXUI30',
                 'X-Algolia-Application-Id': 'Y1FNZXUI30',
             }, query={
             }, query={
-                'facetFilters': json.dumps([
-                    'search_season_list:' + season,
-                    'search_document_klass:episode',
-                    'search_show_slug:' + slug,
-                ]),
-                'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title' % slug,
+                'facetFilters': json.dumps(facet_filters),
+                'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug,
                 'attributesToHighlight': '',
                 'attributesToHighlight': '',
                 'hitsPerPage': 1000,
                 'hitsPerPage': 1000,
             })
             })
 
 
         def entries():
         def entries():
             for episode in (season_search.get('hits') or []):
             for episode in (season_search.get('hits') or []):
-                search_url = episode.get('search_url')
+                search_url = episode.get('search_url')  # always formatted like '/episode/123-title-of-episode'
                 if not search_url:
                 if not search_url:
                     continue
                     continue
                 yield {
                 yield {
                     '_type': 'url',
                     '_type': 'url',
-                    'url': 'https://www.%s.com%s' % (show_name, search_url),
-                    'id': try_get(episode, lambda e: e['objectID'].split('_')[-1]),
+                    'url': 'https://www.americastestkitchen.com%s%s' % (show_path, search_url),
+                    'id': try_get(episode, lambda e: e['objectID'].rsplit('_', 1)[-1]),
                     'title': episode.get('title'),
                     'title': episode.get('title'),
                     'description': episode.get('description'),
                     'description': episode.get('description'),
                     'timestamp': unified_timestamp(episode.get('search_document_date')),
                     'timestamp': unified_timestamp(episode.get('search_document_date')),
@@ -156,4 +217,4 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
                 }
                 }
 
 
         return self.playlist_result(
         return self.playlist_result(
-            entries(), 'season_%d' % season_number, season)
+            entries(), playlist_id, playlist_title)

+ 59 - 0
youtube_dl/extractor/bigo.py

@@ -0,0 +1,59 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError, urlencode_postdata
+
+
+class BigoIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'
+
+    _TESTS = [{
+        'url': 'https://www.bigo.tv/ja/221338632',
+        'info_dict': {
+            'id': '6576287577575737440',
+            'title': '土よ〜💁‍♂️ 休憩室/REST room',
+            'thumbnail': r're:https?://.+',
+            'uploader': '✨Shin💫',
+            'uploader_id': '221338632',
+            'is_live': True,
+        },
+        'skip': 'livestream',
+    }, {
+        'url': 'https://www.bigo.tv/th/Tarlerm1304',
+        'only_matching': True,
+    }, {
+        'url': 'https://bigo.tv/115976881',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        user_id = self._match_id(url)
+
+        info_raw = self._download_json(
+            'https://bigo.tv/studio/getInternalStudioInfo',
+            user_id, data=urlencode_postdata({'siteId': user_id}))
+
+        if not isinstance(info_raw, dict):
+            raise ExtractorError('Received invalid JSON data')
+        if info_raw.get('code'):
+            raise ExtractorError(
+                'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)
+        info = info_raw.get('data') or {}
+
+        if not info.get('alive'):
+            raise ExtractorError('This user is offline.', expected=True)
+
+        return {
+            'id': info.get('roomId') or user_id,
+            'title': info.get('roomTopic') or info.get('nick_name') or user_id,
+            'formats': [{
+                'url': info.get('hls_src'),
+                'ext': 'mp4',
+                'protocol': 'm3u8',
+            }],
+            'thumbnail': info.get('snapshot'),
+            'uploader': info.get('nick_name'),
+            'uploader_id': user_id,
+            'is_live': True,
+        }

+ 5 - 0
youtube_dl/extractor/bilibili.py

@@ -369,6 +369,11 @@ class BilibiliAudioIE(BilibiliAudioBaseIE):
             'filesize': int_or_none(play_data.get('size')),
             'filesize': int_or_none(play_data.get('size')),
         }]
         }]
 
 
+        for a_format in formats:
+            a_format.setdefault('http_headers', {}).update({
+                'Referer': url,
+            })
+
         song = self._call_api('song/info', au_id)
         song = self._call_api('song/info', au_id)
         title = song['title']
         title = song['title']
         statistic = song.get('statistic') or {}
         statistic = song.get('statistic') or {}

+ 173 - 0
youtube_dl/extractor/blerp.py

@@ -0,0 +1,173 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from ..utils import (
+    strip_or_none,
+    traverse_obj,
+)
+from .common import InfoExtractor
+
+
+class BlerpIE(InfoExtractor):
+    IE_NAME = 'blerp'
+    _VALID_URL = r'https?://(?:www\.)?blerp\.com/soundbites/(?P<id>[0-9a-zA-Z]+)'
+    _TESTS = [{
+        'url': 'https://blerp.com/soundbites/6320fe8745636cb4dd677a5a',
+        'info_dict': {
+            'id': '6320fe8745636cb4dd677a5a',
+            'title': 'Samsung Galaxy S8 Over the Horizon Ringtone 2016',
+            'uploader': 'luminousaj',
+            'uploader_id': '5fb81e51aa66ae000c395478',
+            'ext': 'mp3',
+            'tags': ['samsung', 'galaxy', 's8', 'over the horizon', '2016', 'ringtone'],
+        }
+    }, {
+        'url': 'https://blerp.com/soundbites/5bc94ef4796001000498429f',
+        'info_dict': {
+            'id': '5bc94ef4796001000498429f',
+            'title': 'Yee',
+            'uploader': '179617322678353920',
+            'uploader_id': '5ba99cf71386730004552c42',
+            'ext': 'mp3',
+            'tags': ['YEE', 'YEET', 'wo ha haah catchy tune yee', 'yee']
+        }
+    }]
+
+    _GRAPHQL_OPERATIONNAME = "webBitePageGetBite"
+    _GRAPHQL_QUERY = (
+        '''query webBitePageGetBite($_id: MongoID!) {
+            web {
+                biteById(_id: $_id) {
+                    ...bitePageFrag
+                    __typename
+                }
+                __typename
+            }
+        }
+
+        fragment bitePageFrag on Bite {
+            _id
+            title
+            userKeywords
+            keywords
+            color
+            visibility
+            isPremium
+            owned
+            price
+            extraReview
+            isAudioExists
+            image {
+                filename
+                original {
+                    url
+                    __typename
+                }
+                __typename
+            }
+            userReactions {
+                _id
+                reactions
+                createdAt
+                __typename
+            }
+            topReactions
+            totalSaveCount
+            saved
+            blerpLibraryType
+            license
+            licenseMetaData
+            playCount
+            totalShareCount
+            totalFavoriteCount
+            totalAddedToBoardCount
+            userCategory
+            userAudioQuality
+            audioCreationState
+            transcription
+            userTranscription
+            description
+            createdAt
+            updatedAt
+            author
+            listingType
+            ownerObject {
+                _id
+                username
+                profileImage {
+                    filename
+                    original {
+                        url
+                        __typename
+                    }
+                    __typename
+                }
+                __typename
+            }
+            transcription
+            favorited
+            visibility
+            isCurated
+            sourceUrl
+            audienceRating
+            strictAudienceRating
+            ownerId
+            reportObject {
+                reportedContentStatus
+                __typename
+            }
+            giphy {
+                mp4
+                gif
+                __typename
+            }
+            audio {
+                filename
+                original {
+                    url
+                    __typename
+                }
+                mp3 {
+                    url
+                    __typename
+                }
+                __typename
+            }
+            __typename
+        }
+
+        ''')
+
+    def _real_extract(self, url):
+        audio_id = self._match_id(url)
+
+        data = {
+            'operationName': self._GRAPHQL_OPERATIONNAME,
+            'query': self._GRAPHQL_QUERY,
+            'variables': {
+                '_id': audio_id
+            }
+        }
+
+        headers = {
+            'Content-Type': 'application/json'
+        }
+
+        json_result = self._download_json('https://api.blerp.com/graphql',
+                                          audio_id, data=json.dumps(data).encode('utf-8'), headers=headers)
+
+        bite_json = json_result['data']['web']['biteById']
+
+        info_dict = {
+            'id': bite_json['_id'],
+            'url': bite_json['audio']['mp3']['url'],
+            'title': bite_json['title'],
+            'uploader': traverse_obj(bite_json, ('ownerObject', 'username'), expected_type=strip_or_none),
+            'uploader_id': traverse_obj(bite_json, ('ownerObject', '_id'), expected_type=strip_or_none),
+            'ext': 'mp3',
+            'tags': list(filter(None, map(strip_or_none, (traverse_obj(bite_json, 'userKeywords', expected_type=list) or []))) or None)
+        }
+
+        return info_dict

+ 17 - 1
youtube_dl/extractor/bongacams.py

@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import re
 import re
@@ -12,13 +13,28 @@ from ..utils import (
 
 
 
 
 class BongaCamsIE(InfoExtractor):
 class BongaCamsIE(InfoExtractor):
-    _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.com)/(?P<id>[^/?&#]+)'
+    _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.(?:com|net))/(?P<id>[^/?&#]+)'
     _TESTS = [{
     _TESTS = [{
         'url': 'https://de.bongacams.com/azumi-8',
         'url': 'https://de.bongacams.com/azumi-8',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
         'url': 'https://cn.bongacams.com/azumi-8',
         'url': 'https://cn.bongacams.com/azumi-8',
         'only_matching': True,
         'only_matching': True,
+    }, {
+        'url': 'https://de.bongacams.net/claireashton',
+        'info_dict': {
+            'id': 'claireashton',
+            'ext': 'mp4',
+            'title': r're:ClaireAshton \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
+            'age_limit': 18,
+            'uploader_id': 'ClaireAshton',
+            'uploader': 'ClaireAshton',
+            'like_count': int,
+            'is_live': True,
+        },
+        'params': {
+            'skip_download': True,
+        },
     }]
     }]
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):

+ 79 - 0
youtube_dl/extractor/caffeine.py

@@ -0,0 +1,79 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    merge_dicts,
+    parse_iso8601,
+    T,
+    traverse_obj,
+    txt_or_none,
+    urljoin,
+)
+
+
+class CaffeineTVIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?caffeine\.tv/[^/]+/video/(?P<id>[0-9a-f-]+)'
+    _TESTS = [{
+        'url': 'https://www.caffeine.tv/TsuSurf/video/cffc0a00-e73f-11ec-8080-80017d29f26e',
+        'info_dict': {
+            'id': 'cffc0a00-e73f-11ec-8080-80017d29f26e',
+            'ext': 'mp4',
+            'title': 'GOOOOD MORNINNNNN #highlights',
+            'timestamp': 1654702180,
+            'upload_date': '20220608',
+            'uploader': 'TsuSurf',
+            'duration': 3145,
+            'age_limit': 17,
+        },
+        'params': {
+            'format': 'bestvideo',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        json_data = self._download_json(
+            'https://api.caffeine.tv/social/public/activity/' + video_id,
+            video_id)
+        broadcast_info = traverse_obj(json_data, ('broadcast_info', T(dict))) or {}
+        title = broadcast_info['broadcast_title']
+        video_url = broadcast_info['video_url']
+
+        ext = determine_ext(video_url)
+        if ext == 'm3u8':
+            formats = self._extract_m3u8_formats(
+                video_url, video_id, 'mp4', entry_protocol='m3u8',
+                fatal=False)
+        else:
+            formats = [{'url': video_url}]
+        self._sort_formats(formats)
+
+        return merge_dicts({
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+        }, traverse_obj(json_data, {
+            'uploader': ((None, 'user'), 'username'),
+        }, get_all=False), traverse_obj(json_data, {
+            'like_count': ('like_count', T(int_or_none)),
+            'view_count': ('view_count', T(int_or_none)),
+            'comment_count': ('comment_count', T(int_or_none)),
+            'tags': ('tags', Ellipsis, T(txt_or_none)),
+            'is_live': 'is_live',
+            'uploader': ('user', 'name'),
+        }), traverse_obj(broadcast_info, {
+            'duration': ('content_duration', T(int_or_none)),
+            'timestamp': ('broadcast_start_time', T(parse_iso8601)),
+            'thumbnail': ('preview_image_path', T(lambda u: urljoin(url, u))),
+            'age_limit': ('content_rating', T(lambda r: r and {
+                # assume Apple Store ratings [1]
+                # 1. https://en.wikipedia.org/wiki/Mobile_software_content_rating_system
+                'FOUR_PLUS': 0,
+                'NINE_PLUS': 9,
+                'TWELVE_PLUS': 12,
+                'SEVENTEEN_PLUS': 17,
+            }.get(r, 17))),
+        }))

+ 74 - 0
youtube_dl/extractor/callin.py

@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    ExtractorError,
+    traverse_obj,
+    try_get,
+)
+
+
+class CallinIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?callin\.com/episode/(?:[^/#?-]+-)*(?P<id>[^/#?-]+)'
+    _TESTS = [{
+        'url': 'https://www.callin.com/episode/fcc-commissioner-brendan-carr-on-elons-PrumRdSQJW',
+        'md5': '14ede27ee2c957b7e4db93140fc0745c',
+        'info_dict': {
+            'id': 'PrumRdSQJW',
+            'ext': 'mp4',
+            'title': 'FCC Commissioner Brendan Carr on Elon’s Starlink',
+            'description': 'Or, why the government doesn’t like SpaceX',
+            'channel': 'The Pull Request',
+            'channel_url': 'https://callin.com/show/the-pull-request-ucnDJmEKAa',
+        }
+    }, {
+        'url': 'https://www.callin.com/episode/episode-81-elites-melt-down-over-student-debt-lzxMidUnjA',
+        'md5': '16f704ddbf82a27e3930533b12062f07',
+        'info_dict': {
+            'id': 'lzxMidUnjA',
+            'ext': 'mp4',
+            'title': 'Episode 81- Elites MELT DOWN over Student Debt Victory? Rumble in NYC?',
+            'description': 'Let’s talk todays episode about the primary election shake up in NYC and the elites melting down over student debt cancelation.',
+            'channel': 'The DEBRIEF With Briahna Joy Gray',
+            'channel_url': 'https://callin.com/show/the-debrief-with-briahna-joy-gray-siiFDzGegm',
+        }
+    }]
+
+    def _search_nextjs_data(self, webpage, video_id, transform_source=None, fatal=True, **kw):
+        return self._parse_json(
+            self._search_regex(
+                r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
+                webpage, 'next.js data', fatal=fatal, **kw),
+            video_id, transform_source=transform_source, fatal=fatal)
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        next_data = self._search_nextjs_data(webpage, video_id)
+        episode = traverse_obj(next_data, ('props', 'pageProps', 'episode'), expected_type=dict)
+        if not episode:
+            raise ExtractorError('Failed to find episode data')
+
+        title = episode.get('title') or self._og_search_title(webpage)
+        description = episode.get('description') or self._og_search_description(webpage)
+
+        formats = []
+        formats.extend(self._extract_m3u8_formats(
+            episode.get('m3u8'), video_id, 'mp4',
+            entry_protocol='m3u8_native', fatal=False))
+        self._sort_formats(formats)
+
+        channel = try_get(episode, lambda x: x['show']['title'], compat_str)
+        channel_url = try_get(episode, lambda x: x['show']['linkObj']['resourceUrl'], compat_str)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'formats': formats,
+            'channel': channel,
+            'channel_url': channel_url,
+        }

+ 10 - 24
youtube_dl/extractor/cammodels.py

@@ -3,7 +3,6 @@ from __future__ import unicode_literals
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..utils import (
 from ..utils import (
-    ExtractorError,
     int_or_none,
     int_or_none,
     url_or_none,
     url_or_none,
 )
 )
@@ -20,32 +19,11 @@ class CamModelsIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         user_id = self._match_id(url)
         user_id = self._match_id(url)
 
 
-        webpage = self._download_webpage(
-            url, user_id, headers=self.geo_verification_headers())
-
-        manifest_root = self._html_search_regex(
-            r'manifestUrlRoot=([^&\']+)', webpage, 'manifest', default=None)
-
-        if not manifest_root:
-            ERRORS = (
-                ("I'm offline, but let's stay connected", 'This user is currently offline'),
-                ('in a private show', 'This user is in a private show'),
-                ('is currently performing LIVE', 'This model is currently performing live'),
-            )
-            for pattern, message in ERRORS:
-                if pattern in webpage:
-                    error = message
-                    expected = True
-                    break
-            else:
-                error = 'Unable to find manifest URL root'
-                expected = False
-            raise ExtractorError(error, expected=expected)
-
         manifest = self._download_json(
         manifest = self._download_json(
-            '%s%s.json' % (manifest_root, user_id), user_id)
+            'https://manifest-server.naiadsystems.com/live/s:%s.json' % user_id, user_id)
 
 
         formats = []
         formats = []
+        thumbnails = []
         for format_id, format_dict in manifest['formats'].items():
         for format_id, format_dict in manifest['formats'].items():
             if not isinstance(format_dict, dict):
             if not isinstance(format_dict, dict):
                 continue
                 continue
@@ -85,6 +63,13 @@ class CamModelsIE(InfoExtractor):
                         'preference': -1,
                         'preference': -1,
                     })
                     })
                 else:
                 else:
+                    if format_id == 'jpeg':
+                        thumbnails.append({
+                            'url': f['url'],
+                            'width': f['width'],
+                            'height': f['height'],
+                            'format_id': f['format_id'],
+                        })
                     continue
                     continue
                 formats.append(f)
                 formats.append(f)
         self._sort_formats(formats)
         self._sort_formats(formats)
@@ -92,6 +77,7 @@ class CamModelsIE(InfoExtractor):
         return {
         return {
             'id': user_id,
             'id': user_id,
             'title': self._live_title(user_id),
             'title': self._live_title(user_id),
+            'thumbnails': thumbnails,
             'is_live': True,
             'is_live': True,
             'formats': formats,
             'formats': formats,
             'age_limit': 18
             'age_limit': 18

+ 91 - 79
youtube_dl/extractor/ceskatelevize.py

@@ -12,70 +12,136 @@ from ..utils import (
     ExtractorError,
     ExtractorError,
     float_or_none,
     float_or_none,
     sanitized_Request,
     sanitized_Request,
-    unescapeHTML,
-    update_url_query,
+    str_or_none,
+    traverse_obj,
     urlencode_postdata,
     urlencode_postdata,
     USER_AGENTS,
     USER_AGENTS,
 )
 )
 
 
 
 
 class CeskaTelevizeIE(InfoExtractor):
 class CeskaTelevizeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/ivysilani/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
+    _VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/(?:ivysilani|porady|zive)/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
     _TESTS = [{
     _TESTS = [{
-        'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
+        'url': 'http://www.ceskatelevize.cz/ivysilani/10441294653-hyde-park-civilizace/215411058090502/bonus/20641-bonus-01-en',
         'info_dict': {
         'info_dict': {
-            'id': '61924494877246241',
+            'id': '61924494877028507',
             'ext': 'mp4',
             'ext': 'mp4',
-            'title': 'Hyde Park Civilizace: Život v Grónsku',
-            'description': 'md5:3fec8f6bb497be5cdb0c9e8781076626',
+            'title': 'Bonus 01 - En - Hyde Park Civilizace',
+            'description': 'English Subtittles',
             'thumbnail': r're:^https?://.*\.jpg',
             'thumbnail': r're:^https?://.*\.jpg',
-            'duration': 3350,
+            'duration': 81.3,
         },
         },
         'params': {
         'params': {
             # m3u8 download
             # m3u8 download
             'skip_download': True,
             'skip_download': True,
         },
         },
     }, {
     }, {
-        'url': 'http://www.ceskatelevize.cz/ivysilani/10441294653-hyde-park-civilizace/215411058090502/bonus/20641-bonus-01-en',
+        # live stream
+        'url': 'http://www.ceskatelevize.cz/zive/ct1/',
         'info_dict': {
         'info_dict': {
-            'id': '61924494877028507',
+            'id': '102',
             'ext': 'mp4',
             'ext': 'mp4',
-            'title': 'Hyde Park Civilizace: Bonus 01 - En',
-            'description': 'English Subtittles',
-            'thumbnail': r're:^https?://.*\.jpg',
-            'duration': 81.3,
+            'title': r'ČT1 - živé vysílání online',
+            'description': 'Sledujte živé vysílání kanálu ČT1 online. Vybírat si můžete i z dalších kanálů České televize na kterémkoli z vašich zařízení.',
+            'is_live': True,
         },
         },
         'params': {
         'params': {
             # m3u8 download
             # m3u8 download
             'skip_download': True,
             'skip_download': True,
         },
         },
     }, {
     }, {
-        # live stream
+        # another
         'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
         'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
+        'only_matching': True,
         'info_dict': {
         'info_dict': {
             'id': 402,
             'id': 402,
             'ext': 'mp4',
             'ext': 'mp4',
             'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
             'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
             'is_live': True,
             'is_live': True,
         },
         },
+        # 'skip': 'Georestricted to Czech Republic',
+    }, {
+        'url': 'http://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php?hash=d6a3e1370d2e4fa76296b90bad4dfc19673b641e&IDEC=217 562 22150/0004&channelID=1&width=100%25',
+        'only_matching': True,
+    }, {
+        # video with 18+ caution trailer
+        'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
+        'info_dict': {
+            'id': '215562210900007-bogotart',
+            'title': 'Bogotart - Queer',
+            'description': 'Hlavní město Kolumbie v doprovodu queer umělců. Vroucí svět plný vášně, sebevědomí, ale i násilí a bolesti',
+        },
+        'playlist': [{
+            'info_dict': {
+                'id': '61924494877311053',
+                'ext': 'mp4',
+                'title': 'Bogotart - Queer (Varování 18+)',
+                'duration': 11.9,
+            },
+        }, {
+            'info_dict': {
+                'id': '61924494877068022',
+                'ext': 'mp4',
+                'title': 'Bogotart - Queer (Queer)',
+                'thumbnail': r're:^https?://.*\.jpg',
+                'duration': 1558.3,
+            },
+        }],
         'params': {
         'params': {
             # m3u8 download
             # m3u8 download
             'skip_download': True,
             'skip_download': True,
         },
         },
-        'skip': 'Georestricted to Czech Republic',
     }, {
     }, {
-        'url': 'http://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php?hash=d6a3e1370d2e4fa76296b90bad4dfc19673b641e&IDEC=217 562 22150/0004&channelID=1&width=100%25',
+        # iframe embed
+        'url': 'http://www.ceskatelevize.cz/porady/10614999031-neviditelni/21251212048/',
         'only_matching': True,
         'only_matching': True,
     }]
     }]
 
 
+    def _search_nextjs_data(self, webpage, video_id, **kw):
+        return self._parse_json(
+            self._search_regex(
+                r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
+                webpage, 'next.js data', **kw),
+            video_id, **kw)
+
     def _real_extract(self, url):
     def _real_extract(self, url):
         playlist_id = self._match_id(url)
         playlist_id = self._match_id(url)
-
-        webpage = self._download_webpage(url, playlist_id)
+        webpage, urlh = self._download_webpage_handle(url, playlist_id)
+        parsed_url = compat_urllib_parse_urlparse(urlh.geturl())
+        site_name = self._og_search_property('site_name', webpage, fatal=False, default='Česká televize')
+        playlist_title = self._og_search_title(webpage, default=None)
+        if site_name and playlist_title:
+            playlist_title = re.split(r'\s*[—|]\s*%s' % (site_name, ), playlist_title, 1)[0]
+        playlist_description = self._og_search_description(webpage, default=None)
+        if playlist_description:
+            playlist_description = playlist_description.replace('\xa0', ' ')
+
+        type_ = 'IDEC'
+        if re.search(r'(^/porady|/zive)/', parsed_url.path):
+            next_data = self._search_nextjs_data(webpage, playlist_id)
+            if '/zive/' in parsed_url.path:
+                idec = traverse_obj(next_data, ('props', 'pageProps', 'data', 'liveBroadcast', 'current', 'idec'), get_all=False)
+            else:
+                idec = traverse_obj(next_data, ('props', 'pageProps', 'data', ('show', 'mediaMeta'), 'idec'), get_all=False)
+                if not idec:
+                    idec = traverse_obj(next_data, ('props', 'pageProps', 'data', 'videobonusDetail', 'bonusId'), get_all=False)
+                    if idec:
+                        type_ = 'bonus'
+            if not idec:
+                raise ExtractorError('Failed to find IDEC id')
+            iframe_hash = self._download_webpage(
+                'https://www.ceskatelevize.cz/v-api/iframe-hash/',
+                playlist_id, note='Getting IFRAME hash')
+            query = {'hash': iframe_hash, 'origin': 'iVysilani', 'autoStart': 'true', type_: idec, }
+            webpage = self._download_webpage(
+                'https://www.ceskatelevize.cz/ivysilani/embed/iFramePlayer.php',
+                playlist_id, note='Downloading player', query=query)
 
 
         NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
         NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
         if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
         if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
-            raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
+            self.raise_geo_restricted(NOT_AVAILABLE_STRING)
+        if any(not_found in webpage for not_found in ('Neplatný parametr pro videopřehrávač', 'IDEC nebyl nalezen', )):
+            raise ExtractorError('no video with IDEC available', video_id=idec, expected=True)
 
 
         type_ = None
         type_ = None
         episode_id = None
         episode_id = None
@@ -100,7 +166,7 @@ class CeskaTelevizeIE(InfoExtractor):
         data = {
         data = {
             'playlist[0][type]': type_,
             'playlist[0][type]': type_,
             'playlist[0][id]': episode_id,
             'playlist[0][id]': episode_id,
-            'requestUrl': compat_urllib_parse_urlparse(url).path,
+            'requestUrl': parsed_url.path,
             'requestSource': 'iVysilani',
             'requestSource': 'iVysilani',
         }
         }
 
 
@@ -108,7 +174,7 @@ class CeskaTelevizeIE(InfoExtractor):
 
 
         for user_agent in (None, USER_AGENTS['Safari']):
         for user_agent in (None, USER_AGENTS['Safari']):
             req = sanitized_Request(
             req = sanitized_Request(
-                'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
+                'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist/',
                 data=urlencode_postdata(data))
                 data=urlencode_postdata(data))
 
 
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
             req.add_header('Content-type', 'application/x-www-form-urlencoded')
@@ -130,9 +196,6 @@ class CeskaTelevizeIE(InfoExtractor):
             req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
             req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
             req.add_header('Referer', url)
             req.add_header('Referer', url)
 
 
-            playlist_title = self._og_search_title(webpage, default=None)
-            playlist_description = self._og_search_description(webpage, default=None)
-
             playlist = self._download_json(req, playlist_id, fatal=False)
             playlist = self._download_json(req, playlist_id, fatal=False)
             if not playlist:
             if not playlist:
                 continue
                 continue
@@ -167,7 +230,7 @@ class CeskaTelevizeIE(InfoExtractor):
                     entries[num]['formats'].extend(formats)
                     entries[num]['formats'].extend(formats)
                     continue
                     continue
 
 
-                item_id = item.get('id') or item['assetId']
+                item_id = str_or_none(item.get('id') or item['assetId'])
                 title = item['title']
                 title = item['title']
 
 
                 duration = float_or_none(item.get('duration'))
                 duration = float_or_none(item.get('duration'))
@@ -181,8 +244,6 @@ class CeskaTelevizeIE(InfoExtractor):
 
 
                 if playlist_len == 1:
                 if playlist_len == 1:
                     final_title = playlist_title or title
                     final_title = playlist_title or title
-                    if is_live:
-                        final_title = self._live_title(final_title)
                 else:
                 else:
                     final_title = '%s (%s)' % (playlist_title, title)
                     final_title = '%s (%s)' % (playlist_title, title)
 
 
@@ -200,6 +261,8 @@ class CeskaTelevizeIE(InfoExtractor):
         for e in entries:
         for e in entries:
             self._sort_formats(e['formats'])
             self._sort_formats(e['formats'])
 
 
+        if len(entries) == 1:
+            return entries[0]
         return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
         return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
 
 
     def _get_subtitles(self, episode_id, subs):
     def _get_subtitles(self, episode_id, subs):
@@ -236,54 +299,3 @@ class CeskaTelevizeIE(InfoExtractor):
                     yield line
                     yield line
 
 
         return '\r\n'.join(_fix_subtitle(subtitles))
         return '\r\n'.join(_fix_subtitle(subtitles))
-
-
-class CeskaTelevizePoradyIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/porady/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
-    _TESTS = [{
-        # video with 18+ caution trailer
-        'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
-        'info_dict': {
-            'id': '215562210900007-bogotart',
-            'title': 'Queer: Bogotart',
-            'description': 'Alternativní průvodce současným queer světem',
-        },
-        'playlist': [{
-            'info_dict': {
-                'id': '61924494876844842',
-                'ext': 'mp4',
-                'title': 'Queer: Bogotart (Varování 18+)',
-                'duration': 10.2,
-            },
-        }, {
-            'info_dict': {
-                'id': '61924494877068022',
-                'ext': 'mp4',
-                'title': 'Queer: Bogotart (Queer)',
-                'thumbnail': r're:^https?://.*\.jpg',
-                'duration': 1558.3,
-            },
-        }],
-        'params': {
-            # m3u8 download
-            'skip_download': True,
-        },
-    }, {
-        # iframe embed
-        'url': 'http://www.ceskatelevize.cz/porady/10614999031-neviditelni/21251212048/',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-
-        webpage = self._download_webpage(url, video_id)
-
-        data_url = update_url_query(unescapeHTML(self._search_regex(
-            (r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
-             r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?ceskatelevize\.cz/ivysilani/embed/iFramePlayer\.php.*?)\1'),
-            webpage, 'iframe player url', group='url')), query={
-                'autoStart': 'true',
-        })
-
-        return self.url_result(data_url, ie=CeskaTelevizeIE.ie_key())

+ 69 - 0
youtube_dl/extractor/clipchamp.py

@@ -0,0 +1,69 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    ExtractorError,
+    merge_dicts,
+    T,
+    traverse_obj,
+    unified_timestamp,
+    url_or_none,
+)
+
+
+class ClipchampIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?clipchamp\.com/watch/(?P<id>[\w-]+)'
+    _TESTS = [{
+        'url': 'https://clipchamp.com/watch/gRXZ4ZhdDaU',
+        'info_dict': {
+            'id': 'gRXZ4ZhdDaU',
+            'ext': 'mp4',
+            'title': 'Untitled video',
+            'uploader': 'Alexander Schwartz',
+            'timestamp': 1680805580,
+            'upload_date': '20230406',
+            'thumbnail': r're:^https?://.+\.jpg',
+        },
+        'params': {
+            'skip_download': 'm3u8',
+            'format': 'bestvideo',
+        },
+    }]
+
+    _STREAM_URL_TMPL = 'https://%s.cloudflarestream.com/%s/manifest/video.%s'
+    _STREAM_URL_QUERY = {'parentOrigin': 'https://clipchamp.com'}
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video']
+
+        storage_location = data.get('storage_location')
+        if storage_location != 'cf_stream':
+            raise ExtractorError('Unsupported clip storage location "%s"' % (storage_location,))
+
+        path = data['download_url']
+        iframe = self._download_webpage(
+            'https://iframe.cloudflarestream.com/' + path, video_id, 'Downloading player iframe')
+        subdomain = self._search_regex(
+            r'''\bcustomer-domain-prefix\s*=\s*("|')(?P<sd>[\w-]+)\1''', iframe,
+            'subdomain', group='sd', fatal=False) or 'customer-2ut9yn3y6fta1yxe'
+
+        formats = self._extract_mpd_formats(
+            self._STREAM_URL_TMPL % (subdomain, path, 'mpd'), video_id,
+            query=self._STREAM_URL_QUERY, fatal=False, mpd_id='dash')
+        formats.extend(self._extract_m3u8_formats(
+            self._STREAM_URL_TMPL % (subdomain, path, 'm3u8'), video_id, 'mp4',
+            query=self._STREAM_URL_QUERY, fatal=False, m3u8_id='hls'))
+
+        return merge_dicts({
+            'id': video_id,
+            'formats': formats,
+            'uploader': ' '.join(traverse_obj(data, ('creator', ('first_name', 'last_name'), T(compat_str)))) or None,
+        }, traverse_obj(data, {
+            'title': ('project', 'project_name', T(compat_str)),
+            'timestamp': ('created_at', T(unified_timestamp)),
+            'thumbnail': ('thumbnail_url', T(url_or_none)),
+        }), rev=True)

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 521 - 207
youtube_dl/extractor/common.py


+ 148 - 0
youtube_dl/extractor/cpac.py

@@ -0,0 +1,148 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    int_or_none,
+    str_or_none,
+    try_get,
+    unified_timestamp,
+    update_url_query,
+    urljoin,
+)
+
+# compat_range
+try:
+    if callable(xrange):
+        range = xrange
+except (NameError, TypeError):
+    pass
+
+
+class CPACIE(InfoExtractor):
+    IE_NAME = 'cpac'
+    _VALID_URL = r'https?://(?:www\.)?cpac\.ca/(?P<fr>l-)?episode\?id=(?P<id>[\da-f]{8}(?:-[\da-f]{4}){3}-[\da-f]{12})'
+    _TEST = {
+        # 'url': 'http://www.cpac.ca/en/programs/primetime-politics/episodes/65490909',
+        'url': 'https://www.cpac.ca/episode?id=fc7edcae-4660-47e1-ba61-5b7f29a9db0f',
+        'md5': 'e46ad699caafd7aa6024279f2614e8fa',
+        'info_dict': {
+            'id': 'fc7edcae-4660-47e1-ba61-5b7f29a9db0f',
+            'ext': 'mp4',
+            'upload_date': '20220215',
+            'title': 'News Conference to Celebrate National Kindness Week – February 15, 2022',
+            'description': 'md5:466a206abd21f3a6f776cdef290c23fb',
+            'timestamp': 1644901200,
+        },
+        'params': {
+            'format': 'bestvideo',
+            'hls_prefer_native': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        url_lang = 'fr' if '/l-episode?' in url else 'en'
+
+        content = self._download_json(
+            'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/episode/index.xml&crafterSite=cpacca&id=' + video_id,
+            video_id)
+        video_url = try_get(content, lambda x: x['page']['details']['videoUrl'], compat_str)
+        formats = []
+        if video_url:
+            content = content['page']
+            title = str_or_none(content['details']['title_%s_t' % (url_lang, )])
+            formats = self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', ext='mp4')
+            for fmt in formats:
+                # prefer language to match URL
+                fmt_lang = fmt.get('language')
+                if fmt_lang == url_lang:
+                    fmt['language_preference'] = 10
+                elif not fmt_lang:
+                    fmt['language_preference'] = -1
+                else:
+                    fmt['language_preference'] = -10
+
+        self._sort_formats(formats)
+
+        category = str_or_none(content['details']['category_%s_t' % (url_lang, )])
+
+        def is_live(v_type):
+            return (v_type == 'live') if v_type is not None else None
+
+        return {
+            'id': video_id,
+            'formats': formats,
+            'title': title,
+            'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
+            'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
+            'category': [category] if category else None,
+            'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
+            'is_live': is_live(content['details'].get('type')),
+        }
+
+
+class CPACPlaylistIE(InfoExtractor):
+    IE_NAME = 'cpac:playlist'
+    _VALID_URL = r'(?i)https?://(?:www\.)?cpac\.ca/(?:program|search|(?P<fr>emission|rechercher))\?(?:[^&]+&)*?(?P<id>(?:id=\d+|programId=\d+|key=[^&]+))'
+
+    _TESTS = [{
+        'url': 'https://www.cpac.ca/program?id=6',
+        'info_dict': {
+            'id': 'id=6',
+            'title': 'Headline Politics',
+            'description': 'Watch CPAC’s signature long-form coverage of the day’s pressing political events as they unfold.',
+        },
+        'playlist_count': 10,
+    }, {
+        'url': 'https://www.cpac.ca/search?key=hudson&type=all&order=desc',
+        'info_dict': {
+            'id': 'key=hudson',
+            'title': 'hudson',
+        },
+        'playlist_count': 22,
+    }, {
+        'url': 'https://www.cpac.ca/search?programId=50',
+        'info_dict': {
+            'id': 'programId=50',
+            'title': '50',
+        },
+        'playlist_count': 9,
+    }, {
+        'url': 'https://www.cpac.ca/emission?id=6',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cpac.ca/rechercher?key=hudson&type=all&order=desc',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        url_lang = 'fr' if any(x in url for x in ('/emission?', '/rechercher?')) else 'en'
+        pl_type, list_type = ('program', 'itemList') if any(x in url for x in ('/program?', '/emission?')) else ('search', 'searchResult')
+        api_url = (
+            'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/%s/index.xml&crafterSite=cpacca&%s'
+            % (pl_type, video_id, ))
+        content = self._download_json(api_url, video_id)
+        entries = []
+        total_pages = int_or_none(try_get(content, lambda x: x['page'][list_type]['totalPages']), default=1)
+        for page in range(1, total_pages + 1):
+            if page > 1:
+                api_url = update_url_query(api_url, {'page': '%d' % (page, ), })
+                content = self._download_json(
+                    api_url, video_id,
+                    note='Downloading continuation - %d' % (page, ),
+                    fatal=False)
+
+            for item in try_get(content, lambda x: x['page'][list_type]['item'], list) or []:
+                episode_url = urljoin(url, try_get(item, lambda x: x['url_%s_s' % (url_lang, )]))
+                if episode_url:
+                    entries.append(episode_url)
+
+        return self.playlist_result(
+            (self.url_result(entry) for entry in entries),
+            playlist_id=video_id,
+            playlist_title=try_get(content, lambda x: x['page']['program']['title_%s_t' % (url_lang, )]) or video_id.split('=')[-1],
+            playlist_description=try_get(content, lambda x: x['page']['program']['description_%s_t' % (url_lang, )]),
+        )

+ 204 - 0
youtube_dl/extractor/dlf.py

@@ -0,0 +1,204 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+)
+from ..utils import (
+    determine_ext,
+    extract_attributes,
+    int_or_none,
+    merge_dicts,
+    traverse_obj,
+    url_or_none,
+    variadic,
+)
+
+
+class DLFBaseIE(InfoExtractor):
+    _VALID_URL_BASE = r'https?://(?:www\.)?deutschlandfunk\.de/'
+    _BUTTON_REGEX = r'(<button[^>]+alt="Anhören"[^>]+data-audio-diraid[^>]*>)'
+
+    def _parse_button_attrs(self, button, audio_id=None):
+        attrs = extract_attributes(button)
+        audio_id = audio_id or attrs['data-audio-diraid']
+
+        url = traverse_obj(
+            attrs, 'data-audio-download-src', 'data-audio', 'data-audioreference',
+            'data-audio-src', expected_type=url_or_none)
+        ext = determine_ext(url)
+        formats = (self._extract_m3u8_formats(url, audio_id, fatal=False)
+                   if ext == 'm3u8' else [{'url': url, 'ext': ext, 'vcodec': 'none'}])
+        self._sort_formats(formats)
+
+        def traverse_attrs(path):
+            path = list(variadic(path))
+            t = path.pop() if callable(path[-1]) else None
+            return traverse_obj(attrs, path, expected_type=t, get_all=False)
+
+        def txt_or_none(v, default=None):
+            return default if v is None else (compat_str(v).strip() or default)
+
+        return merge_dicts(*reversed([{
+            'id': audio_id,
+            # 'extractor_key': DLFIE.ie_key(),
+            # 'extractor': DLFIE.IE_NAME,
+            'formats': formats,
+        }, dict((k, traverse_attrs(v)) for k, v in {
+            'title': (('data-audiotitle', 'data-audio-title', 'data-audio-download-tracking-title'), txt_or_none),
+            'duration': (('data-audioduration', 'data-audio-duration'), int_or_none),
+            'thumbnail': ('data-audioimage', url_or_none),
+            'uploader': 'data-audio-producer',
+            'series': 'data-audio-series',
+            'channel': 'data-audio-origin-site-name',
+            'webpage_url': ('data-audio-download-tracking-path', url_or_none),
+        }.items())]))
+
+
+class DLFIE(DLFBaseIE):
+    IE_NAME = 'dlf'
+    _VALID_URL = DLFBaseIE._VALID_URL_BASE + r'[\w-]+-dlf-(?P<id>[\da-f]{8})-100\.html'
+    _TESTS = [
+        # Audio as an HLS stream
+        {
+            'url': 'https://www.deutschlandfunk.de/tanz-der-saiteninstrumente-das-wild-strings-trio-aus-slowenien-dlf-03a3eb19-100.html',
+            'info_dict': {
+                'id': '03a3eb19',
+                'title': r're:Tanz der Saiteninstrumente [-/] Das Wild Strings Trio aus Slowenien',
+                'ext': 'm4a',
+                'duration': 3298,
+                'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
+                'uploader': 'Deutschlandfunk',
+                'series': 'On Stage',
+                'channel': 'deutschlandfunk'
+            },
+            'params': {
+                'skip_download': 'm3u8'
+            },
+            'skip': 'This webpage no longer exists'
+        }, {
+            'url': 'https://www.deutschlandfunk.de/russische-athleten-kehren-zurueck-auf-die-sportbuehne-ein-gefaehrlicher-tueroeffner-dlf-d9cc1856-100.html',
+            'info_dict': {
+                'id': 'd9cc1856',
+                'title': 'Russische Athleten kehren zurück auf die Sportbühne: Ein gefährlicher Türöffner',
+                'ext': 'mp3',
+                'duration': 291,
+                'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
+                'uploader': 'Deutschlandfunk',
+                'series': 'Kommentare und Themen der Woche',
+                'channel': 'deutschlandfunk'
+            }
+        },
+    ]
+
+    def _real_extract(self, url):
+        audio_id = self._match_id(url)
+        webpage = self._download_webpage(url, audio_id)
+
+        return self._parse_button_attrs(
+            self._search_regex(self._BUTTON_REGEX, webpage, 'button'), audio_id)
+
+
+class DLFCorpusIE(DLFBaseIE):
+    IE_NAME = 'dlf:corpus'
+    IE_DESC = 'DLF Multi-feed Archives'
+    _VALID_URL = DLFBaseIE._VALID_URL_BASE + r'(?P<id>(?![\w-]+-dlf-[\da-f]{8})[\w-]+-\d+)\.html'
+    _TESTS = [
+        # Recorded news broadcast with referrals to related broadcasts
+        {
+            'url': 'https://www.deutschlandfunk.de/fechten-russland-belarus-ukraine-protest-100.html',
+            'info_dict': {
+                'id': 'fechten-russland-belarus-ukraine-protest-100',
+                'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
+                'description': 'md5:91340aab29c71aa7518ad5be13d1e8ad'
+            },
+            'playlist_mincount': 5,
+            'playlist': [{
+                'info_dict': {
+                    'id': '1fc5d64a',
+                    'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
+                    'ext': 'mp3',
+                    'duration': 252,
+                    'thumbnail': 'https://assets.deutschlandfunk.de/aad16241-6b76-4a09-958b-96d0ee1d6f57/512x512.jpg?t=1679480020313',
+                    'uploader': 'Deutschlandfunk',
+                    'series': 'Sport',
+                    'channel': 'deutschlandfunk'
+                }
+            }, {
+                'info_dict': {
+                    'id': '2ada145f',
+                    'title': r're:(?:Sportpolitik / )?Fechtverband votiert für Rückkehr russischer Athleten',
+                    'ext': 'mp3',
+                    'duration': 336,
+                    'thumbnail': 'https://assets.deutschlandfunk.de/FILE_93982766f7317df30409b8a184ac044a/512x512.jpg?t=1678547581005',
+                    'uploader': 'Deutschlandfunk',
+                    'series': 'Deutschlandfunk Nova',
+                    'channel': 'deutschlandfunk-nova'
+                }
+            }, {
+                'info_dict': {
+                    'id': '5e55e8c9',
+                    'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
+                    'ext': 'mp3',
+                    'duration': 187,
+                    'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
+                    'uploader': 'Deutschlandfunk',
+                    'series': 'Sport am Samstag',
+                    'channel': 'deutschlandfunk'
+                }
+            }, {
+                'info_dict': {
+                    'id': '47e1a096',
+                    'title': r're:Rückkehr Russlands im Fechten [-/] "Fassungslos, dass es einfach so passiert ist"',
+                    'ext': 'mp3',
+                    'duration': 602,
+                    'thumbnail': 'https://assets.deutschlandfunk.de/da4c494a-21cc-48b4-9cc7-40e09fd442c2/512x512.jpg?t=1678562155770',
+                    'uploader': 'Deutschlandfunk',
+                    'series': 'Sport am Samstag',
+                    'channel': 'deutschlandfunk'
+                }
+            }, {
+                'info_dict': {
+                    'id': '5e55e8c9',
+                    'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
+                    'ext': 'mp3',
+                    'duration': 187,
+                    'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
+                    'uploader': 'Deutschlandfunk',
+                    'series': 'Sport am Samstag',
+                    'channel': 'deutschlandfunk'
+                }
+            }]
+        },
+        # Podcast feed with tag buttons, playlist count fluctuates
+        {
+            'url': 'https://www.deutschlandfunk.de/kommentare-und-themen-der-woche-100.html',
+            'info_dict': {
+                'id': 'kommentare-und-themen-der-woche-100',
+                'title': 'Meinung - Kommentare und Themen der Woche',
+                'description': 'md5:2901bbd65cd2d45e116d399a099ce5d5',
+            },
+            'playlist_mincount': 10,
+        },
+        # Podcast feed with no description
+        {
+            'url': 'https://www.deutschlandfunk.de/podcast-tolle-idee-100.html',
+            'info_dict': {
+                'id': 'podcast-tolle-idee-100',
+                'title': 'Wissenschaftspodcast - Tolle Idee! - Was wurde daraus?',
+            },
+            'playlist_mincount': 11,
+        },
+    ]
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+        webpage = self._download_webpage(url, playlist_id)
+
+        return self.playlist_result(
+            map(self._parse_button_attrs, re.findall(self._BUTTON_REGEX, webpage)),
+            playlist_id, self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
+            self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage, default=None))

+ 101 - 0
youtube_dl/extractor/epidemicsound.py

@@ -0,0 +1,101 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    float_or_none,
+    T,
+    traverse_obj,
+    txt_or_none,
+    unified_timestamp,
+    url_or_none,
+)
+
+
+class EpidemicSoundIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?epidemicsound\.com/track/(?P<id>[0-9a-zA-Z]+)'
+    _TESTS = [{
+        'url': 'https://www.epidemicsound.com/track/yFfQVRpSPz/',
+        'md5': 'd98ff2ddb49e8acab9716541cbc9dfac',
+        'info_dict': {
+            'id': '45014',
+            'display_id': 'yFfQVRpSPz',
+            'ext': 'mp3',
+            'tags': ['foley', 'door', 'knock', 'glass', 'window', 'glass door knock'],
+            'title': 'Door Knock Door 1',
+            'duration': 1,
+            'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/default-sfx/3000x3000.jpg',
+            'timestamp': 1415320353,
+            'upload_date': '20141107',
+            'age_limit': None,
+            # check that the "best" format was found, since test file MD5 doesn't
+            # distinguish the formats
+            'format': 'full',
+        },
+    }, {
+        'url': 'https://www.epidemicsound.com/track/mj8GTTwsZd/',
+        'md5': 'c82b745890f9baf18dc2f8d568ee3830',
+        'info_dict': {
+            'id': '148700',
+            'display_id': 'mj8GTTwsZd',
+            'ext': 'mp3',
+            'tags': ['liquid drum n bass', 'energetic'],
+            'title': 'Noplace',
+            'duration': 237,
+            'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/11138/3000x3000.jpg',
+            'timestamp': 1694426482,
+            'release_timestamp': 1700535606,
+            'upload_date': '20230911',
+            'age_limit': None,
+            'format': 'full',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        json_data = self._download_json('https://www.epidemicsound.com/json/track/' + video_id, video_id)
+
+        def fmt_or_none(f):
+            if not f.get('format'):
+                f['format'] = f.get('format_id')
+            elif not f.get('format_id'):
+                f['format_id'] = f['format']
+            if not (f['url'] and f['format']):
+                return
+            if f.get('format_note'):
+                f['format_note'] = 'track ID ' + f['format_note']
+            f['preference'] = -1 if f['format'] == 'full' else -2
+            return f
+
+        formats = traverse_obj(json_data, (
+            'stems', T(dict.items), Ellipsis, {
+                'format': (0, T(txt_or_none)),
+                'format_note': (1, 's3TrackId', T(txt_or_none)),
+                'format_id': (1, 'stemType', T(txt_or_none)),
+                'url': (1, 'lqMp3Url', T(url_or_none)),
+            }, T(fmt_or_none)))
+
+        self._sort_formats(formats)
+
+        info = traverse_obj(json_data, {
+            'id': ('id', T(txt_or_none)),
+            'tags': ('metadataTags', Ellipsis, T(txt_or_none)),
+            'title': ('title', T(txt_or_none)),
+            'duration': ('length', T(float_or_none)),
+            'timestamp': ('added', T(unified_timestamp)),
+            'thumbnail': (('imageUrl', 'cover'), T(url_or_none)),
+            'age_limit': ('isExplicit', T(lambda b: 18 if b else None)),
+            'release_timestamp': ('releaseDate', T(unified_timestamp)),
+        }, get_all=False)
+
+        info.update(traverse_obj(json_data, {
+            'categories': ('genres', Ellipsis, 'tag', T(txt_or_none)),
+            'tags': ('metadataTags', Ellipsis, T(txt_or_none)),
+        }))
+
+        info.update({
+            'display_id': video_id,
+            'formats': formats,
+        })
+
+        return info

+ 69 - 18
youtube_dl/extractor/extractors.py

@@ -51,6 +51,10 @@ from .anvato import AnvatoIE
 from .aol import AolIE
 from .aol import AolIE
 from .allocine import AllocineIE
 from .allocine import AllocineIE
 from .aliexpress import AliExpressLiveIE
 from .aliexpress import AliExpressLiveIE
+from .alsace20tv import (
+    Alsace20TVIE,
+    Alsace20TVEmbedIE,
+)
 from .apa import APAIE
 from .apa import APAIE
 from .aparat import AparatIE
 from .aparat import AparatIE
 from .appleconnect import AppleConnectIE
 from .appleconnect import AppleConnectIE
@@ -115,6 +119,7 @@ from .bfmtv import (
 )
 )
 from .bibeltv import BibelTVIE
 from .bibeltv import BibelTVIE
 from .bigflix import BigflixIE
 from .bigflix import BigflixIE
+from .bigo import BigoIE
 from .bild import BildIE
 from .bild import BildIE
 from .bilibili import (
 from .bilibili import (
     BiliBiliIE,
     BiliBiliIE,
@@ -133,6 +138,7 @@ from .bleacherreport import (
     BleacherReportIE,
     BleacherReportIE,
     BleacherReportCMSIE,
     BleacherReportCMSIE,
 )
 )
+from .blerp import BlerpIE
 from .bloomberg import BloombergIE
 from .bloomberg import BloombergIE
 from .bokecc import BokeCCIE
 from .bokecc import BokeCCIE
 from .bongacams import BongaCamsIE
 from .bongacams import BongaCamsIE
@@ -153,6 +159,8 @@ from .businessinsider import BusinessInsiderIE
 from .buzzfeed import BuzzFeedIE
 from .buzzfeed import BuzzFeedIE
 from .byutv import BYUtvIE
 from .byutv import BYUtvIE
 from .c56 import C56IE
 from .c56 import C56IE
+from .caffeine import CaffeineTVIE
+from .callin import CallinIE
 from .camdemy import (
 from .camdemy import (
     CamdemyIE,
     CamdemyIE,
     CamdemyFolderIE
     CamdemyFolderIE
@@ -203,10 +211,7 @@ from .ccc import (
 from .ccma import CCMAIE
 from .ccma import CCMAIE
 from .cctv import CCTVIE
 from .cctv import CCTVIE
 from .cda import CDAIE
 from .cda import CDAIE
-from .ceskatelevize import (
-    CeskaTelevizeIE,
-    CeskaTelevizePoradyIE,
-)
+from .ceskatelevize import CeskaTelevizeIE
 from .channel9 import Channel9IE
 from .channel9 import Channel9IE
 from .charlierose import CharlieRoseIE
 from .charlierose import CharlieRoseIE
 from .chaturbate import ChaturbateIE
 from .chaturbate import ChaturbateIE
@@ -222,6 +227,7 @@ from .ciscolive import (
     CiscoLiveSearchIE,
     CiscoLiveSearchIE,
 )
 )
 from .cjsw import CJSWIE
 from .cjsw import CJSWIE
+from .clipchamp import ClipchampIE
 from .cliphunter import CliphunterIE
 from .cliphunter import CliphunterIE
 from .clippit import ClippitIE
 from .clippit import ClippitIE
 from .cliprs import ClipRsIE
 from .cliprs import ClipRsIE
@@ -254,6 +260,10 @@ from .commonprotocols import (
 from .condenast import CondeNastIE
 from .condenast import CondeNastIE
 from .contv import CONtvIE
 from .contv import CONtvIE
 from .corus import CorusIE
 from .corus import CorusIE
+from .cpac import (
+    CPACIE,
+    CPACPlaylistIE,
+)
 from .cracked import CrackedIE
 from .cracked import CrackedIE
 from .crackle import CrackleIE
 from .crackle import CrackleIE
 from .crooksandliars import CrooksAndLiarsIE
 from .crooksandliars import CrooksAndLiarsIE
@@ -287,6 +297,10 @@ from .dbtv import DBTVIE
 from .dctp import DctpTvIE
 from .dctp import DctpTvIE
 from .deezer import DeezerPlaylistIE
 from .deezer import DeezerPlaylistIE
 from .democracynow import DemocracynowIE
 from .democracynow import DemocracynowIE
+from .dlf import (
+    DLFCorpusIE,
+    DLFIE,
+)
 from .dfb import DFBIE
 from .dfb import DFBIE
 from .dhm import DHMIE
 from .dhm import DHMIE
 from .digg import DiggIE
 from .digg import DiggIE
@@ -344,6 +358,7 @@ from .ellentube import (
 from .elpais import ElPaisIE
 from .elpais import ElPaisIE
 from .embedly import EmbedlyIE
 from .embedly import EmbedlyIE
 from .engadget import EngadgetIE
 from .engadget import EngadgetIE
+from .epidemicsound import EpidemicSoundIE
 from .eporner import EpornerIE
 from .eporner import EpornerIE
 from .eroprofile import EroProfileIE
 from .eroprofile import EroProfileIE
 from .escapist import EscapistIE
 from .escapist import EscapistIE
@@ -368,6 +383,7 @@ from .fc2 import (
     FC2EmbedIE,
     FC2EmbedIE,
 )
 )
 from .fczenit import FczenitIE
 from .fczenit import FczenitIE
+from .fifa import FifaIE
 from .filmon import (
 from .filmon import (
     FilmOnIE,
     FilmOnIE,
     FilmOnChannelIE,
     FilmOnChannelIE,
@@ -427,6 +443,7 @@ from .gamespot import GameSpotIE
 from .gamestar import GameStarIE
 from .gamestar import GameStarIE
 from .gaskrank import GaskrankIE
 from .gaskrank import GaskrankIE
 from .gazeta import GazetaIE
 from .gazeta import GazetaIE
+from .gbnews import GBNewsIE
 from .gdcvault import GDCVaultIE
 from .gdcvault import GDCVaultIE
 from .gedidigital import GediDigitalIE
 from .gedidigital import GediDigitalIE
 from .generic import GenericIE
 from .generic import GenericIE
@@ -434,6 +451,13 @@ from .gfycat import GfycatIE
 from .giantbomb import GiantBombIE
 from .giantbomb import GiantBombIE
 from .giga import GigaIE
 from .giga import GigaIE
 from .glide import GlideIE
 from .glide import GlideIE
+from .globalplayer import (
+    GlobalPlayerLiveIE,
+    GlobalPlayerLivePlaylistIE,
+    GlobalPlayerAudioIE,
+    GlobalPlayerAudioEpisodeIE,
+    GlobalPlayerVideoIE
+)
 from .globo import (
 from .globo import (
     GloboIE,
     GloboIE,
     GloboArticleIE,
     GloboArticleIE,
@@ -470,6 +494,7 @@ from .hotstar import (
 )
 )
 from .howcast import HowcastIE
 from .howcast import HowcastIE
 from .howstuffworks import HowStuffWorksIE
 from .howstuffworks import HowStuffWorksIE
+from .hrfernsehen import HRFernsehenIE
 from .hrti import (
 from .hrti import (
     HRTiIE,
     HRTiIE,
     HRTiPlaylistIE,
     HRTiPlaylistIE,
@@ -546,8 +571,10 @@ from .khanacademy import (
 from .kickstarter import KickStarterIE
 from .kickstarter import KickStarterIE
 from .kinja import KinjaEmbedIE
 from .kinja import KinjaEmbedIE
 from .kinopoisk import KinoPoiskIE
 from .kinopoisk import KinoPoiskIE
+from .kommunetv import KommunetvIE
 from .konserthusetplay import KonserthusetPlayIE
 from .konserthusetplay import KonserthusetPlayIE
 from .krasview import KrasViewIE
 from .krasview import KrasViewIE
+from .kth import KTHIE
 from .ku6 import Ku6IE
 from .ku6 import Ku6IE
 from .kusi import KUSIIE
 from .kusi import KUSIIE
 from .kuwo import (
 from .kuwo import (
@@ -717,6 +744,7 @@ from .myvi import (
     MyviIE,
     MyviIE,
     MyviEmbedIE,
     MyviEmbedIE,
 )
 )
+from .myvideoge import MyVideoGeIE
 from .myvidster import MyVidsterIE
 from .myvidster import MyVidsterIE
 from .nationalgeographic import (
 from .nationalgeographic import (
     NationalGeographicVideoIE,
     NationalGeographicVideoIE,
@@ -870,21 +898,13 @@ from .ooyala import (
 )
 )
 from .ora import OraTVIE
 from .ora import OraTVIE
 from .orf import (
 from .orf import (
-    ORFTVthekIE,
-    ORFFM4IE,
+    ORFONIE,
+    ORFONLiveIE,
     ORFFM4StoryIE,
     ORFFM4StoryIE,
-    ORFOE1IE,
-    ORFOE3IE,
-    ORFNOEIE,
-    ORFWIEIE,
-    ORFBGLIE,
-    ORFOOEIE,
-    ORFSTMIE,
-    ORFKTNIE,
-    ORFSBGIE,
-    ORFTIRIE,
-    ORFVBGIE,
     ORFIPTVIE,
     ORFIPTVIE,
+    ORFPodcastIE,
+    ORFRadioIE,
+    ORFRadioCollectionIE,
 )
 )
 from .outsidetv import OutsideTVIE
 from .outsidetv import OutsideTVIE
 from .packtpub import (
 from .packtpub import (
@@ -901,6 +921,10 @@ from .parliamentliveuk import ParliamentLiveUKIE
 from .patreon import PatreonIE
 from .patreon import PatreonIE
 from .pbs import PBSIE
 from .pbs import PBSIE
 from .pearvideo import PearVideoIE
 from .pearvideo import PearVideoIE
+from .peekvids import (
+    PeekVidsIE,
+    PlayVidsIE,
+)
 from .peertube import PeerTubeIE
 from .peertube import PeerTubeIE
 from .people import PeopleIE
 from .people import PeopleIE
 from .performgroup import PerformGroupIE
 from .performgroup import PerformGroupIE
@@ -957,6 +981,10 @@ from .pornhub import (
 from .pornotube import PornotubeIE
 from .pornotube import PornotubeIE
 from .pornovoisines import PornoVoisinesIE
 from .pornovoisines import PornoVoisinesIE
 from .pornoxo import PornoXOIE
 from .pornoxo import PornoXOIE
+from .pr0gramm import (
+    Pr0grammIE,
+    Pr0grammStaticIE,
+)
 from .puhutv import (
 from .puhutv import (
     PuhuTVIE,
     PuhuTVIE,
     PuhuTVSerieIE,
     PuhuTVSerieIE,
@@ -994,6 +1022,10 @@ from .raywenderlich import (
     RayWenderlichIE,
     RayWenderlichIE,
     RayWenderlichCourseIE,
     RayWenderlichCourseIE,
 )
 )
+from .rbgtum import (
+    RbgTumIE,
+    RbgTumCourseIE,
+)
 from .rbmaradio import RBMARadioIE
 from .rbmaradio import RBMARadioIE
 from .rds import RDSIE
 from .rds import RDSIE
 from .redbulltv import (
 from .redbulltv import (
@@ -1049,6 +1081,10 @@ from .rutube import (
 from .rutv import RUTVIE
 from .rutv import RUTVIE
 from .ruutu import RuutuIE
 from .ruutu import RuutuIE
 from .ruv import RuvIE
 from .ruv import RuvIE
+from .s4c import (
+    S4CIE,
+    S4CSeriesIE,
+)
 from .safari import (
 from .safari import (
     SafariIE,
     SafariIE,
     SafariApiIE,
     SafariApiIE,
@@ -1184,6 +1220,7 @@ from .storyfire import (
 from .streamable import StreamableIE
 from .streamable import StreamableIE
 from .streamcloud import StreamcloudIE
 from .streamcloud import StreamcloudIE
 from .streamcz import StreamCZIE
 from .streamcz import StreamCZIE
+from .streamsb import StreamsbIE
 from .streetvoice import StreetVoiceIE
 from .streetvoice import StreetVoiceIE
 from .stretchinternet import StretchInternetIE
 from .stretchinternet import StretchInternetIE
 from .stv import STVPlayerIE
 from .stv import STVPlayerIE
@@ -1253,6 +1290,11 @@ from .theweatherchannel import TheWeatherChannelIE
 from .thisamericanlife import ThisAmericanLifeIE
 from .thisamericanlife import ThisAmericanLifeIE
 from .thisav import ThisAVIE
 from .thisav import ThisAVIE
 from .thisoldhouse import ThisOldHouseIE
 from .thisoldhouse import ThisOldHouseIE
+from .thisvid import (
+    ThisVidIE,
+    ThisVidMemberIE,
+    ThisVidPlaylistIE,
+)
 from .threeqsdn import ThreeQSDNIE
 from .threeqsdn import ThreeQSDNIE
 from .tiktok import (
 from .tiktok import (
     TikTokIE,
     TikTokIE,
@@ -1537,6 +1579,7 @@ from .weibo import (
     WeiboMobileIE
     WeiboMobileIE
 )
 )
 from .weiqitv import WeiqiTVIE
 from .weiqitv import WeiqiTVIE
+from .whyp import WhypIE
 from .wistia import (
 from .wistia import (
     WistiaIE,
     WistiaIE,
     WistiaPlaylistIE,
     WistiaPlaylistIE,
@@ -1602,7 +1645,15 @@ from .younow import (
     YouNowChannelIE,
     YouNowChannelIE,
     YouNowMomentIE,
     YouNowMomentIE,
 )
 )
-from .youporn import YouPornIE
+from .youporn import (
+    YouPornIE,
+    YouPornCategoryIE,
+    YouPornChannelIE,
+    YouPornCollectionIE,
+    YouPornStarIE,
+    YouPornTagIE,
+    YouPornVideosIE,
+)
 from .yourporn import YourPornIE
 from .yourporn import YourPornIE
 from .yourupload import YourUploadIE
 from .yourupload import YourUploadIE
 from .youtube import (
 from .youtube import (

+ 101 - 0
youtube_dl/extractor/fifa.py

@@ -0,0 +1,101 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+from ..utils import (
+    int_or_none,
+    traverse_obj,
+    unified_timestamp,
+)
+
+if not callable(getattr(InfoExtractor, '_match_valid_url', None)):
+
+    BaseInfoExtractor = InfoExtractor
+
+    import re
+
+    class InfoExtractor(BaseInfoExtractor):
+
+        @classmethod
+        def _match_valid_url(cls, url):
+            return re.match(cls._VALID_URL, url)
+
+
+class FifaIE(InfoExtractor):
+    _VALID_URL = r'https?://www.fifa.com/fifaplus/(?P<locale>\w{2})/watch/([^#?]+/)?(?P<id>\w+)'
+    _TESTS = [{
+        'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y',
+        'info_dict': {
+            'id': '7on10qPcnyLajDDU3ntg6y',
+            'title': 'Italy v France | Final | 2006 FIFA World Cup Germany™ | Full Match Replay',
+            'description': 'md5:f4520d0ee80529c8ba4134a7d692ff8b',
+            'ext': 'mp4',
+            'categories': ['FIFA Tournaments'],
+            'thumbnail': 'https://digitalhub.fifa.com/transform/135e2656-3a51-407b-8810-6c34bec5b59b/FMR_2006_Italy_France_Final_Hero',
+            'duration': 8165,
+        },
+        'params': {'skip_download': 'm3u8'},
+    }, {
+        'url': 'https://www.fifa.com/fifaplus/pt/watch/1cg5r5Qt6Qt12ilkDgb1sV',
+        'info_dict': {
+            'id': '1cg5r5Qt6Qt12ilkDgb1sV',
+            'title': 'Brazil v Germany | Semi-finals | 2014 FIFA World Cup Brazil™ | Extended Highlights',
+            'description': 'md5:d908c74ee66322b804ae2e521b02a855',
+            'ext': 'mp4',
+            'categories': ['FIFA Tournaments', 'Highlights'],
+            'thumbnail': 'https://digitalhub.fifa.com/transform/d8fe6f61-276d-4a73-a7fe-6878a35fd082/FIFAPLS_100EXTHL_2014BRAvGER_TMB',
+            'duration': 902,
+            'release_timestamp': 1404777600,
+            'release_date': '20140708',
+        },
+        'params': {'skip_download': 'm3u8'},
+    }, {
+        'url': 'https://www.fifa.com/fifaplus/fr/watch/3C6gQH9C2DLwzNx7BMRQdp',
+        'info_dict': {
+            'id': '3C6gQH9C2DLwzNx7BMRQdp',
+            'title': 'Josimar goal against Northern Ireland | Classic Goals',
+            'description': 'md5:cbe7e7bb52f603c9f1fe9a4780fe983b',
+            'ext': 'mp4',
+            'categories': ['FIFA Tournaments', 'Goal'],
+            'duration': 28,
+            'thumbnail': 'https://digitalhub.fifa.com/transform/f9301391-f8d9-48b5-823e-c093ac5e3e11/CG_MEN_1986_JOSIMAR',
+        },
+        'params': {'skip_download': 'm3u8'},
+    }]
+
+    def _real_extract(self, url):
+        video_id, locale = self._match_valid_url(url).group('id', 'locale')
+        webpage = self._download_webpage(url, video_id)
+
+        preconnect_link = self._search_regex(
+            r'<link\b[^>]+\brel\s*=\s*"preconnect"[^>]+href\s*=\s*"([^"]+)"', webpage, 'Preconnect Link')
+
+        video_details = self._download_json(
+            '{preconnect_link}/sections/videoDetails/{video_id}'.format(**locals()), video_id, 'Downloading Video Details', fatal=False)
+
+        preplay_parameters = self._download_json(
+            '{preconnect_link}/videoPlayerData/{video_id}'.format(**locals()), video_id, 'Downloading Preplay Parameters')['preplayParameters']
+
+        content_data = self._download_json(
+            # 1. query string is expected to be sent as-is
+            # 2. `sig` must be appended
+            # 3. if absent, the call appears to work but the manifest is bad (404)
+            'https://content.uplynk.com/preplay/{contentId}/multiple.json?{queryStr}&sig={signature}'.format(**preplay_parameters),
+            video_id, 'Downloading Content Data')
+
+        # formats, subtitles = self._extract_m3u8_formats_and_subtitles(content_data['playURL'], video_id)
+        formats, subtitles = self._extract_m3u8_formats(content_data['playURL'], video_id, ext='mp4', entry_protocol='m3u8_native'), None
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': video_details['title'],
+            'description': video_details.get('description'),
+            'duration': int_or_none(video_details.get('duration')),
+            'release_timestamp': unified_timestamp(video_details.get('dateOfRelease')),
+            'categories': traverse_obj(video_details, (('videoCategory', 'videoSubcategory'),)),
+            'thumbnail': traverse_obj(video_details, ('backgroundImage', 'src')),
+            'formats': formats,
+            'subtitles': subtitles,
+        }

+ 139 - 0
youtube_dl/extractor/gbnews.py

@@ -0,0 +1,139 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    extract_attributes,
+    ExtractorError,
+    T,
+    traverse_obj,
+    txt_or_none,
+    url_or_none,
+)
+
+
+class GBNewsIE(InfoExtractor):
+    IE_DESC = 'GB News clips, features and live stream'
+
+    # \w+ is normally shows or news, but apparently any word redirects to the correct URL
+    _VALID_URL = r'https?://(?:www\.)?gbnews\.(?:uk|com)/(?:\w+/)?(?P<id>[^#?]+)'
+
+    _PLATFORM = 'safari'
+    _SSMP_URL = 'https://mm-v2.simplestream.com/ssmp/api.php'
+    _TESTS = [{
+        'url': 'https://www.gbnews.uk/shows/andrew-neils-message-to-companies-choosing-to-boycott-gb-news/106889',
+        'info_dict': {
+            'id': '106889',
+            'ext': 'mp4',
+            'title': "Andrew Neil's message to companies choosing to boycott GB News",
+            'description': 'md5:b281f5d22fd6d5eda64a4e3ba771b351',
+        },
+        'skip': '404 not found',
+    }, {
+        'url': 'https://www.gbnews.com/news/bbc-claudine-gay-harvard-university-antisemitism-row',
+        'info_dict': {
+            'id': '52264136',
+            'display_id': 'bbc-claudine-gay-harvard-university-antisemitism-row',
+            'ext': 'mp4',
+            'title': 'BBC deletes post after furious backlash over headline downplaying antisemitism',
+            'description': 'The post was criticised by former employers of the broadcaster',
+        },
+    }, {
+        'url': 'https://www.gbnews.uk/watchlive',
+        'info_dict': {
+            'id': '1069',
+            'display_id': 'watchlive',
+            'ext': 'mp4',
+            'title': 'GB News Live',
+            'is_live': True,
+        },
+        'params': {
+            'skip_download': 'm3u8',
+        },
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url).split('/')[-1]
+
+        webpage = self._download_webpage(url, display_id)
+        # extraction based on https://github.com/ytdl-org/youtube-dl/issues/29341
+        '''
+        <div id="video-106908"
+            class="simplestream"
+            data-id="GB001"
+            data-type="vod"
+            data-key="3Li3Nt2Qs8Ct3Xq9Fi5Uy0Mb2Bj0Qs"
+            data-token="f9c317c727dc07f515b20036c8ef14a6"
+            data-expiry="1624300052"
+            data-uvid="37900558"
+            data-poster="https://thumbnails.simplestreamcdn.com/gbnews/ondemand/37900558.jpg?width=700&"
+            data-npaw="false"
+            data-env="production">
+        '''
+        # exception if no match
+        video_data = self._search_regex(
+            r'(<div\s[^>]*\bclass\s*=\s*(\'|")(?!.*sidebar\b)simplestream(?:\s[\s\w$-]*)?\2[^>]*>)',
+            webpage, 'video data')
+
+        video_data = extract_attributes(video_data)
+        ss_id = video_data.get('data-id')
+        if not ss_id:
+            raise ExtractorError('Simplestream ID not found')
+
+        json_data = self._download_json(
+            self._SSMP_URL, display_id,
+            note='Downloading Simplestream JSON metadata',
+            errnote='Unable to download Simplestream JSON metadata',
+            query={
+                'id': ss_id,
+                'env': video_data.get('data-env', 'production'),
+            }, fatal=False)
+
+        meta_url = traverse_obj(json_data, ('response', 'api_hostname'))
+        if not meta_url:
+            raise ExtractorError('No API host found')
+
+        uvid = video_data['data-uvid']
+        dtype = video_data.get('data-type')
+        stream_data = self._download_json(
+            '%s/api/%s/stream/%s' % (meta_url, 'show' if dtype == 'vod' else dtype, uvid),
+            uvid,
+            query={
+                'key': video_data.get('data-key'),
+                'platform': self._PLATFORM,
+            },
+            headers={
+                'Token': video_data.get('data-token'),
+                'Token-Expiry': video_data.get('data-expiry'),
+                'Uvid': uvid,
+            }, fatal=False)
+
+        stream_url = traverse_obj(stream_data, (
+            'response', 'stream', T(url_or_none)))
+        if not stream_url:
+            raise ExtractorError('No stream data/URL')
+
+        # now known to be a dict
+        stream_data = stream_data['response']
+        drm = stream_data.get('drm')
+        if drm:
+            self.report_drm(uvid)
+
+        formats = self._extract_m3u8_formats(
+            stream_url, uvid, ext='mp4', entry_protocol='m3u8_native',
+            fatal=False)
+        # exception if no formats
+        self._sort_formats(formats)
+
+        return {
+            'id': uvid,
+            'display_id': display_id,
+            'title': (traverse_obj(stream_data, ('title', T(txt_or_none)))
+                      or self._og_search_title(webpage, default=None)
+                      or display_id.replace('-', ' ').capitalize()),
+            'description': self._og_search_description(webpage, default=None),
+            'thumbnail': (traverse_obj(video_data, ('data-poster', T(url_or_none)))
+                          or self._og_search_thumbnail(webpage)),
+            'formats': formats,
+            'is_live': (dtype == 'live') or None,
+        }

+ 217 - 2
youtube_dl/extractor/generic.py

@@ -28,6 +28,7 @@ from ..utils import (
     mimetype2ext,
     mimetype2ext,
     orderedSet,
     orderedSet,
     parse_duration,
     parse_duration,
+    parse_resolution,
     sanitized_Request,
     sanitized_Request,
     smuggle_url,
     smuggle_url,
     unescapeHTML,
     unescapeHTML,
@@ -35,6 +36,7 @@ from ..utils import (
     unsmuggle_url,
     unsmuggle_url,
     UnsupportedError,
     UnsupportedError,
     url_or_none,
     url_or_none,
+    urljoin,
     xpath_attr,
     xpath_attr,
     xpath_text,
     xpath_text,
     xpath_with_ns,
     xpath_with_ns,
@@ -2227,6 +2229,116 @@ class GenericIE(InfoExtractor):
             # Sibnet embed (https://help.sibnet.ru/?sibnet_video_embed)
             # Sibnet embed (https://help.sibnet.ru/?sibnet_video_embed)
             'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
             'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
             'only_matching': True,
             'only_matching': True,
+        }, {
+            # KVS Player
+            'url': 'https://www.kvs-demo.com/videos/105/kelis-4th-of-july/',
+            'info_dict': {
+                'id': '105',
+                'display_id': 'kelis-4th-of-july',
+                'ext': 'mp4',
+                'title': 'Kelis - 4th Of July',
+                'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
+            },
+        }, {
+            # KVS Player
+            'url': 'https://www.kvs-demo.com/embed/105/',
+            'info_dict': {
+                'id': '105',
+                'display_id': 'kelis-4th-of-july',
+                'ext': 'mp4',
+                'title': 'Kelis - 4th Of July / Embed Player',
+                'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        }, {
+            # KVS Player (tested also in thisvid.py)
+            'url': 'https://youix.com/video/leningrad-zoj/',
+            'md5': '94f96ba95706dc3880812b27b7d8a2b8',
+            'info_dict': {
+                'id': '18485',
+                'display_id': 'leningrad-zoj',
+                'ext': 'mp4',
+                'title': 'Клип: Ленинград - ЗОЖ скачать, смотреть онлайн | Youix.com',
+                'thumbnail': r're:https://youix.com/contents/videos_screenshots/18000/18485/preview(?:_480x320_youix_com.mp4)?\.jpg',
+            },
+        }, {
+            # KVS Player
+            'url': 'https://youix.com/embed/18485',
+            'md5': '94f96ba95706dc3880812b27b7d8a2b8',
+            'info_dict': {
+                'id': '18485',
+                'display_id': 'leningrad-zoj',
+                'ext': 'mp4',
+                'title': 'Ленинград - ЗОЖ',
+                'thumbnail': r're:https://youix.com/contents/videos_screenshots/18000/18485/preview(?:_480x320_youix_com.mp4)?\.jpg',
+            },
+        }, {
+            # KVS Player
+            'url': 'https://bogmedia.org/videos/21217/40-nochey-40-nights-2016/',
+            'md5': '94166bdb26b4cb1fb9214319a629fc51',
+            'info_dict': {
+                'id': '21217',
+                'display_id': '40-nochey-2016',
+                'ext': 'mp4',
+                'title': '40 ночей (2016) - BogMedia.org',
+                'description': 'md5:4e6d7d622636eb7948275432eb256dc3',
+                'thumbnail': 'https://bogmedia.org/contents/videos_screenshots/21000/21217/preview_480p.mp4.jpg',
+            },
+        }, {
+            # KVS Player (for sites that serve kt_player.js via non-https urls)
+            'url': 'http://www.camhub.world/embed/389508',
+            'md5': 'fbe89af4cfb59c8fd9f34a202bb03e32',
+            'info_dict': {
+                'id': '389508',
+                'display_id': 'syren-de-mer-onlyfans-05-07-2020have-a-happy-safe-holiday5f014e68a220979bdb8cd-source',
+                'ext': 'mp4',
+                'title': 'Syren De Mer  onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер',
+                'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg',
+            },
+        }, {
+            'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes',
+            'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4',
+            'info_dict': {
+                'id': '5',
+                'display_id': 'selena-gomez-pov-deep-fakes',
+                'ext': 'mp4',
+                'title': 'Selena Gomez POV (Deep Fakes) DeepFake Porn - MrDeepFakes',
+                'description': 'md5:17d1f84b578c9c26875ac5ef9a932354',
+                'height': 720,
+                'age_limit': 18,
+            },
+        }, {
+            'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/',
+            'md5': 'e2f0a4c329f7986280b7328e24036d60',
+            'info_dict': {
+                'id': '284002',
+                'display_id': 'just-out-of-the-shower-joi',
+                'ext': 'mp4',
+                'title': 'Just Out Of The Shower JOI - Shooshtime',
+                'height': 720,
+                'age_limit': 18,
+            },
+        }, {
+            # would like to use the yt-dl test video but searching for
+            # '"\'/\\ä↭𝕐' fails, so using an old vid from YouTube Korea
+            'note': 'Test default search',
+            'url': 'Shorts로 허락 필요없이 놀자! (BTS편)',
+            'info_dict': {
+                'id': 'usDGO4Zb-dc',
+                'ext': 'mp4',
+                'title': 'YouTube Shorts로 허락 필요없이 놀자! (BTS편)',
+                'description': 'md5:96e31607eba81ab441567b5e289f4716',
+                'upload_date': '20211107',
+                'uploader': 'YouTube Korea',
+                'location': '대한민국',
+            },
+            'params': {
+                'default_search': 'ytsearch',
+                'skip_download': True,
+            },
+            'expected_warnings': ['uploader id'],
         },
         },
     ]
     ]
 
 
@@ -2332,6 +2444,88 @@ class GenericIE(InfoExtractor):
             'title': title,
             'title': title,
         }
         }
 
 
+    def _extract_kvs(self, url, webpage, video_id):
+
+        def getlicensetoken(license):
+            modlicense = license.replace('$', '').replace('0', '1')
+            center = int(len(modlicense) / 2)
+            fronthalf = int(modlicense[:center + 1])
+            backhalf = int(modlicense[center:])
+
+            modlicense = compat_str(4 * abs(fronthalf - backhalf))
+
+            def parts():
+                for o in range(0, center + 1):
+                    for i in range(1, 5):
+                        yield compat_str((int(license[o + i]) + int(modlicense[o])) % 10)
+
+            return ''.join(parts())
+
+        def getrealurl(video_url, license_code):
+            if not video_url.startswith('function/0/'):
+                return video_url  # not obfuscated
+
+            url_path, _, url_query = video_url.partition('?')
+            urlparts = url_path.split('/')[2:]
+            license = getlicensetoken(license_code)
+            newmagic = urlparts[5][:32]
+
+            def spells(x, o):
+                l = (o + sum(int(n) for n in license[o:])) % 32
+                for i in range(0, len(x)):
+                    yield {l: x[o], o: x[l]}.get(i, x[i])
+
+            for o in range(len(newmagic) - 1, -1, -1):
+                newmagic = ''.join(spells(newmagic, o))
+
+            urlparts[5] = newmagic + urlparts[5][32:]
+            return '/'.join(urlparts) + '?' + url_query
+
+        flashvars = self._search_regex(
+            r'(?s)<script\b[^>]*>.*?var\s+flashvars\s*=\s*(\{.+?\});.*?</script>',
+            webpage, 'flashvars')
+        flashvars = self._parse_json(flashvars, video_id, transform_source=js_to_json)
+
+        # extract the part after the last / as the display_id from the
+        # canonical URL.
+        display_id = self._search_regex(
+            r'(?:<link href="https?://[^"]+/(.+?)/?" rel="canonical"\s*/?>'
+            r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
+            webpage, 'display_id', fatal=False
+        )
+        title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
+
+        thumbnail = flashvars['preview_url']
+        if thumbnail.startswith('//'):
+            protocol, _, _ = url.partition('/')
+            thumbnail = protocol + thumbnail
+
+        url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys()))
+        formats = []
+        for key in url_keys:
+            if '/get_file/' not in flashvars[key]:
+                continue
+            format_id = flashvars.get(key + '_text', key)
+            formats.append(merge_dicts(
+                parse_resolution(format_id) or parse_resolution(flashvars[key]), {
+                    'url': urljoin(url, getrealurl(flashvars[key], flashvars['license_code'])),
+                    'format_id': format_id,
+                    'ext': 'mp4',
+                    'http_headers': {'Referer': url},
+                }))
+            if not formats[-1].get('height'):
+                formats[-1]['quality'] = 1
+
+        self._sort_formats(formats)
+
+        return {
+            'id': flashvars['video_id'],
+            'display_id': display_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
+
     def _real_extract(self, url):
     def _real_extract(self, url):
         if url.startswith('//'):
         if url.startswith('//'):
             return self.url_result(self.http_scheme() + url)
             return self.url_result(self.http_scheme() + url)
@@ -2540,9 +2734,16 @@ class GenericIE(InfoExtractor):
         # but actually don't.
         # but actually don't.
         AGE_LIMIT_MARKERS = [
         AGE_LIMIT_MARKERS = [
             r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
             r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
+            r'>[^<]*you acknowledge you are at least (\d+) years old',
+            r'>\s*(?:18\s+U(?:\.S\.C\.|SC)\s+)?(?:§+\s*)?2257\b',
         ]
         ]
-        if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
-            age_limit = 18
+        for marker in AGE_LIMIT_MARKERS:
+            m = re.search(marker, webpage)
+            if not m:
+                continue
+            age_limit = max(
+                age_limit or 0,
+                int_or_none(m.groups() and m.group(1), default=18))
 
 
         # video uploader is domain name
         # video uploader is domain name
         video_uploader = self._search_regex(
         video_uploader = self._search_regex(
@@ -3389,6 +3590,20 @@ class GenericIE(InfoExtractor):
                 info_dict['formats'] = formats
                 info_dict['formats'] = formats
                 return info_dict
                 return info_dict
 
 
+        # Look for generic KVS player (before ld+json for tests)
+        found = self._search_regex(
+            (r'<script\b[^>]+?\bsrc\s*=\s*(["\'])https?://(?:\S+?/)+kt_player\.js\?v=(?P<ver>\d+(?:\.\d+)+)\1[^>]*>',
+             # kt_player('kt_player', 'https://i.shoosh.co/player/kt_player.swf?v=5.5.1', ...
+             r'kt_player\s*\(\s*(["\'])(?:(?!\1)[\w\W])+\1\s*,\s*(["\'])https?://(?:\S+?/)+kt_player\.swf\?v=(?P<ver>\d+(?:\.\d+)+)\2\s*,',
+             ), webpage, 'KVS player', group='ver', default=False)
+        if found:
+            self.report_extraction('%s: KVS Player' % (video_id, ))
+            if found.split('.')[0] not in ('4', '5', '6'):
+                self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, ))
+            return merge_dicts(
+                self._extract_kvs(url, webpage, video_id),
+                info_dict)
+
         # Looking for http://schema.org/VideoObject
         # Looking for http://schema.org/VideoObject
         json_ld = self._search_json_ld(
         json_ld = self._search_json_ld(
             webpage, video_id, default={}, expected_type='VideoObject')
             webpage, video_id, default={}, expected_type='VideoObject')

+ 273 - 0
youtube_dl/extractor/globalplayer.py

@@ -0,0 +1,273 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    clean_html,
+    join_nonempty,
+    merge_dicts,
+    parse_duration,
+    str_or_none,
+    T,
+    traverse_obj,
+    unified_strdate,
+    unified_timestamp,
+    urlhandle_detect_ext,
+)
+
+
+class GlobalPlayerBaseIE(InfoExtractor):
+
+    def _get_page_props(self, url, video_id):
+        webpage = self._download_webpage(url, video_id)
+        return self._search_nextjs_data(webpage, video_id)['props']['pageProps']
+
+    def _request_ext(self, url, video_id):
+        return urlhandle_detect_ext(self._request_webpage(  # Server rejects HEAD requests
+            url, video_id, note='Determining source extension'))
+
+    @staticmethod
+    def _clean_desc(x):
+        x = clean_html(x)
+        if x:
+            x = x.replace('\xa0', ' ')
+        return x
+
+    def _extract_audio(self, episode, series):
+
+        return merge_dicts({
+            'vcodec': 'none',
+        }, traverse_obj(series, {
+            'series': 'title',
+            'series_id': 'id',
+            'thumbnail': 'imageUrl',
+            'uploader': 'itunesAuthor',  # podcasts only
+        }), traverse_obj(episode, {
+            'id': 'id',
+            'description': ('description', T(self._clean_desc)),
+            'duration': ('duration', T(parse_duration)),
+            'thumbnail': 'imageUrl',
+            'url': 'streamUrl',
+            'timestamp': (('pubDate', 'startDate'), T(unified_timestamp)),
+            'title': 'title',
+        }, get_all=False), rev=True)
+
+
+class GlobalPlayerLiveIE(GlobalPlayerBaseIE):
+    _VALID_URL = r'https?://www\.globalplayer\.com/live/(?P<id>\w+)/\w+'
+    _TESTS = [{
+        'url': 'https://www.globalplayer.com/live/smoothchill/uk/',
+        'info_dict': {
+            'id': '2mx1E',
+            'ext': 'aac',
+            'display_id': 'smoothchill-uk',
+            'title': 're:^Smooth Chill.+$',
+            'thumbnail': 'https://herald.musicradio.com/media/f296ade8-50c9-4f60-911f-924e96873620.png',
+            'description': 'Music To Chill To',
+            # 'live_status': 'is_live',
+            'is_live': True,
+        },
+    }, {
+        # national station
+        'url': 'https://www.globalplayer.com/live/heart/uk/',
+        'info_dict': {
+            'id': '2mwx4',
+            'ext': 'aac',
+            'description': 'turn up the feel good!',
+            'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
+            # 'live_status': 'is_live',
+            'is_live': True,
+            'title': 're:^Heart UK.+$',
+            'display_id': 'heart-uk',
+        },
+    }, {
+        # regional variation
+        'url': 'https://www.globalplayer.com/live/heart/london/',
+        'info_dict': {
+            'id': 'AMqg',
+            'ext': 'aac',
+            'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
+            'title': 're:^Heart London.+$',
+            # 'live_status': 'is_live',
+            'is_live': True,
+            'display_id': 'heart-london',
+            'description': 'turn up the feel good!',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        station = self._get_page_props(url, video_id)['station']
+        stream_url = station['streamUrl']
+
+        return merge_dicts({
+            'id': station['id'],
+            'display_id': (
+                join_nonempty('brandSlug', 'slug', from_dict=station)
+                or station.get('legacyStationPrefix')),
+            'url': stream_url,
+            'ext': self._request_ext(stream_url, video_id),
+            'vcodec': 'none',
+            'is_live': True,
+        }, {
+            'title': self._live_title(traverse_obj(
+                station, (('name', 'brandName'), T(str_or_none)),
+                get_all=False)),
+        }, traverse_obj(station, {
+            'description': 'tagline',
+            'thumbnail': 'brandLogo',
+        }), rev=True)
+
+
+class GlobalPlayerLivePlaylistIE(GlobalPlayerBaseIE):
+    _VALID_URL = r'https?://www\.globalplayer\.com/playlists/(?P<id>\w+)'
+    _TESTS = [{
+        # "live playlist"
+        'url': 'https://www.globalplayer.com/playlists/8bLk/',
+        'info_dict': {
+            'id': '8bLk',
+            'ext': 'aac',
+            # 'live_status': 'is_live',
+            'is_live': True,
+            'description': r're:(?s).+\bclassical\b.+\bClassic FM Hall [oO]f Fame\b',
+            'thumbnail': 'https://images.globalplayer.com/images/551379?width=450&signature=oMLPZIoi5_dBSHnTMREW0Xg76mA=',
+            'title': 're:Classic FM Hall of Fame.+$'
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        station = self._get_page_props(url, video_id)['playlistData']
+        stream_url = station['streamUrl']
+
+        return merge_dicts({
+            'id': video_id,
+            'url': stream_url,
+            'ext': self._request_ext(stream_url, video_id),
+            'vcodec': 'none',
+            'is_live': True,
+        }, traverse_obj(station, {
+            'title': 'title',
+            'description': ('description', T(self._clean_desc)),
+            'thumbnail': 'image',
+        }), rev=True)
+
+
+class GlobalPlayerAudioIE(GlobalPlayerBaseIE):
+    _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)/|catchup/\w+/\w+/)(?P<id>\w+)/?(?:$|[?#])'
+    _TESTS = [{
+        # podcast
+        'url': 'https://www.globalplayer.com/podcasts/42KuaM/',
+        'playlist_mincount': 5,
+        'info_dict': {
+            'id': '42KuaM',
+            'title': 'Filthy Ritual',
+            'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
+            'categories': ['Society & Culture', 'True Crime'],
+            'uploader': 'Global',
+            'description': r're:(?s).+\bscam\b.+?\bseries available now\b',
+        },
+    }, {
+        # radio catchup
+        'url': 'https://www.globalplayer.com/catchup/lbc/uk/46vyD7z/',
+        'playlist_mincount': 2,
+        'info_dict': {
+            'id': '46vyD7z',
+            'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
+            'title': 'Nick Ferrari',
+            'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
+        props = self._get_page_props(url, video_id)
+        series = props['podcastInfo'] if podcast else props['catchupInfo']
+
+        return merge_dicts({
+            '_type': 'playlist',
+            'id': video_id,
+            'entries': [self._extract_audio(ep, series) for ep in traverse_obj(
+                        series, ('episodes', lambda _, v: v['id'] and v['streamUrl']))],
+            'categories': traverse_obj(series, ('categories', Ellipsis, 'name')) or None,
+        }, traverse_obj(series, {
+            'description': ('description', T(self._clean_desc)),
+            'thumbnail': 'imageUrl',
+            'title': 'title',
+            'uploader': 'itunesAuthor',  # podcasts only
+        }), rev=True)
+
+
+class GlobalPlayerAudioEpisodeIE(GlobalPlayerBaseIE):
+    _VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)|catchup/\w+/\w+)/episodes/(?P<id>\w+)/?(?:$|[?#])'
+    _TESTS = [{
+        # podcast
+        'url': 'https://www.globalplayer.com/podcasts/episodes/7DrfNnE/',
+        'info_dict': {
+            'id': '7DrfNnE',
+            'ext': 'mp3',
+            'title': 'Filthy Ritual - Trailer',
+            'description': 'md5:1f1562fd0f01b4773b590984f94223e0',
+            'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
+            'duration': 225.0,
+            'timestamp': 1681254900,
+            'series': 'Filthy Ritual',
+            'series_id': '42KuaM',
+            'upload_date': '20230411',
+            'uploader': 'Global',
+        },
+    }, {
+        # radio catchup
+        'url': 'https://www.globalplayer.com/catchup/lbc/uk/episodes/2zGq26Vcv1fCWhddC4JAwETXWe/',
+        'only_matching': True,
+        # expired: refresh the details with a current show for a full test
+        'info_dict': {
+            'id': '2zGq26Vcv1fCWhddC4JAwETXWe',
+            'ext': 'm4a',
+            'timestamp': 1682056800,
+            'series': 'Nick Ferrari',
+            'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
+            'upload_date': '20230421',
+            'series_id': '46vyD7z',
+            'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
+            'title': 'Nick Ferrari',
+            'duration': 10800.0,
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
+        props = self._get_page_props(url, video_id)
+        episode = props['podcastEpisode'] if podcast else props['catchupEpisode']
+
+        return self._extract_audio(
+            episode, traverse_obj(episode, 'podcast', 'show', expected_type=dict) or {})
+
+
+class GlobalPlayerVideoIE(GlobalPlayerBaseIE):
+    _VALID_URL = r'https?://www\.globalplayer\.com/videos/(?P<id>\w+)'
+    _TESTS = [{
+        'url': 'https://www.globalplayer.com/videos/2JsSZ7Gm2uP/',
+        'info_dict': {
+            'id': '2JsSZ7Gm2uP',
+            'ext': 'mp4',
+            'description': 'md5:6a9f063c67c42f218e42eee7d0298bfd',
+            'thumbnail': 'md5:d4498af48e15aae4839ce77b97d39550',
+            'upload_date': '20230420',
+            'title': 'Treble Malakai Bayoh sings a sublime Handel aria at Classic FM Live',
+        },
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        meta = self._get_page_props(url, video_id)['videoData']
+
+        return merge_dicts({
+            'id': video_id,
+        }, traverse_obj(meta, {
+            'url': 'url',
+            'thumbnail': ('image', 'url'),
+            'title': 'title',
+            'upload_date': ('publish_date', T(unified_strdate)),
+            'description': 'description',
+        }), rev=True)

+ 101 - 0
youtube_dl/extractor/hrfernsehen.py

@@ -0,0 +1,101 @@
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+import json
+import re
+
+from ..utils import (
+    int_or_none,
+    unified_timestamp,
+    unescapeHTML
+)
+from .common import InfoExtractor
+
+
+class HRFernsehenIE(InfoExtractor):
+    IE_NAME = 'hrfernsehen'
+    _VALID_URL = r'^https?://www\.(?:hr-fernsehen|hessenschau)\.de/.*,video-(?P<id>[0-9]{6})\.html'
+
+    _TESTS = [{
+        'url': 'https://www.hessenschau.de/tv-sendung/hessenschau-vom-26082020,video-130546.html',
+        'md5': '5c4e0ba94677c516a2f65a84110fc536',
+        'info_dict': {
+            'id': '130546',
+            'ext': 'mp4',
+            'description': 'Sturmtief Kirsten fegt über Hessen / Die Corona-Pandemie – eine Chronologie / '
+                           'Sterbehilfe: Die Lage in Hessen / Miss Hessen leitet zwei eigene Unternehmen / '
+                           'Pop-Up Museum zeigt Schwarze Unterhaltung und Black Music',
+            'subtitles': {'de': [{
+                'url': 'https://hr-a.akamaihd.net/video/as/hessenschau/2020_08/hrLogo_200826200407_L385592_512x288-25p-500kbit.vtt'
+            }]},
+            'timestamp': 1598470200,
+            'upload_date': '20200826',
+            'thumbnail': 'https://www.hessenschau.de/tv-sendung/hs_ganz-1554~_t-1598465545029_v-16to9__medium.jpg',
+            'title': 'hessenschau vom 26.08.2020'
+        }
+    }, {
+        'url': 'https://www.hr-fernsehen.de/sendungen-a-z/mex/sendungen/fair-und-gut---was-hinter-aldis-eigenem-guetesiegel-steckt,video-130544.html',
+        'only_matching': True
+    }]
+
+    _GEO_COUNTRIES = ['DE']
+
+    def extract_airdate(self, loader_data):
+        airdate_str = loader_data.get('mediaMetadata', {}).get('agf', {}).get('airdate')
+
+        if airdate_str is None:
+            return None
+
+        return unified_timestamp(airdate_str)
+
+    def extract_formats(self, loader_data):
+        stream_formats = []
+        for stream_obj in loader_data["videoResolutionLevels"]:
+            stream_format = {
+                'format_id': str(stream_obj['verticalResolution']) + "p",
+                'height': stream_obj['verticalResolution'],
+                'url': stream_obj['url'],
+            }
+
+            quality_information = re.search(r'([0-9]{3,4})x([0-9]{3,4})-([0-9]{2})p-([0-9]{3,4})kbit',
+                                            stream_obj['url'])
+            if quality_information:
+                stream_format['width'] = int_or_none(quality_information.group(1))
+                stream_format['height'] = int_or_none(quality_information.group(2))
+                stream_format['fps'] = int_or_none(quality_information.group(3))
+                stream_format['tbr'] = int_or_none(quality_information.group(4))
+
+            stream_formats.append(stream_format)
+
+        self._sort_formats(stream_formats)
+        return stream_formats
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_meta(
+            ['og:title', 'twitter:title', 'name'], webpage)
+        description = self._html_search_meta(
+            ['description'], webpage)
+
+        loader_str = unescapeHTML(self._search_regex(r"data-new-hr-mediaplayer-loader='([^']*)'", webpage, "ardloader"))
+        loader_data = json.loads(loader_str)
+
+        info = {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'formats': self.extract_formats(loader_data),
+            'timestamp': self.extract_airdate(loader_data)
+        }
+
+        if "subtitle" in loader_data:
+            info["subtitles"] = {"de": [{"url": loader_data["subtitle"]}]}
+
+        thumbnails = list(set([t for t in loader_data.get("previewImageUrl", {}).values()]))
+        if len(thumbnails) > 0:
+            info["thumbnails"] = [{"url": t} for t in thumbnails]
+
+        return info

+ 247 - 90
youtube_dl/extractor/ign.py

@@ -1,19 +1,29 @@
+# coding: utf-8
+
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..compat import (
 from ..compat import (
+    compat_filter as filter,
+    compat_HTTPError,
     compat_parse_qs,
     compat_parse_qs,
-    compat_urllib_parse_urlparse,
+    compat_urlparse,
 )
 )
 from ..utils import (
 from ..utils import (
-    HEADRequest,
     determine_ext,
     determine_ext,
+    error_to_compat_str,
+    extract_attributes,
+    ExtractorError,
     int_or_none,
     int_or_none,
+    merge_dicts,
+    orderedSet,
     parse_iso8601,
     parse_iso8601,
     strip_or_none,
     strip_or_none,
-    try_get,
+    traverse_obj,
+    url_or_none,
+    urljoin,
 )
 )
 
 
 
 
@@ -22,69 +32,37 @@ class IGNBaseIE(InfoExtractor):
         return self._download_json(
         return self._download_json(
             'http://apis.ign.com/{0}/v3/{0}s/slug/{1}'.format(self._PAGE_TYPE, slug), slug)
             'http://apis.ign.com/{0}/v3/{0}s/slug/{1}'.format(self._PAGE_TYPE, slug), slug)
 
 
+    def _checked_call_api(self, slug):
+        try:
+            return self._call_api(slug)
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
+                e.cause.args = e.cause.args or [
+                    e.cause.geturl(), e.cause.getcode(), e.cause.reason]
+                raise ExtractorError(
+                    'Content not found: expired?', cause=e.cause,
+                    expected=True)
+            raise
 
 
-class IGNIE(IGNBaseIE):
-    """
-    Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
-    Some videos of it.ign.com are also supported
-    """
-
-    _VALID_URL = r'https?://(?:.+?\.ign|www\.pcmag)\.com/videos/(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[^/?&#]+)'
-    IE_NAME = 'ign.com'
-    _PAGE_TYPE = 'video'
-
-    _TESTS = [{
-        'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
-        'md5': 'd2e1586d9987d40fad7867bf96a018ea',
-        'info_dict': {
-            'id': '8f862beef863986b2785559b9e1aa599',
-            'ext': 'mp4',
-            'title': 'The Last of Us Review',
-            'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
-            'timestamp': 1370440800,
-            'upload_date': '20130605',
-            'tags': 'count:9',
-        }
-    }, {
-        'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data',
-        'md5': 'f1581a6fe8c5121be5b807684aeac3f6',
-        'info_dict': {
-            'id': 'ee10d774b508c9b8ec07e763b9125b91',
-            'ext': 'mp4',
-            'title': 'What\'s New Now: Is GoGo Snooping on Your Data?',
-            'description': 'md5:817a20299de610bd56f13175386da6fa',
-            'timestamp': 1420571160,
-            'upload_date': '20150106',
-            'tags': 'count:4',
-        }
-    }, {
-        'url': 'https://www.ign.com/videos/is-a-resident-evil-4-remake-on-the-way-ign-daily-fix',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        display_id = self._match_id(url)
-        video = self._call_api(display_id)
+    def _extract_video_info(self, video, fatal=True):
         video_id = video['videoId']
         video_id = video['videoId']
-        metadata = video['metadata']
-        title = metadata.get('longTitle') or metadata.get('title') or metadata['name']
 
 
         formats = []
         formats = []
-        refs = video.get('refs') or {}
+        refs = traverse_obj(video, 'refs', expected_type=dict) or {}
 
 
-        m3u8_url = refs.get('m3uUrl')
+        m3u8_url = url_or_none(refs.get('m3uUrl'))
         if m3u8_url:
         if m3u8_url:
             formats.extend(self._extract_m3u8_formats(
             formats.extend(self._extract_m3u8_formats(
                 m3u8_url, video_id, 'mp4', 'm3u8_native',
                 m3u8_url, video_id, 'mp4', 'm3u8_native',
                 m3u8_id='hls', fatal=False))
                 m3u8_id='hls', fatal=False))
 
 
-        f4m_url = refs.get('f4mUrl')
+        f4m_url = url_or_none(refs.get('f4mUrl'))
         if f4m_url:
         if f4m_url:
             formats.extend(self._extract_f4m_formats(
             formats.extend(self._extract_f4m_formats(
                 f4m_url, video_id, f4m_id='hds', fatal=False))
                 f4m_url, video_id, f4m_id='hds', fatal=False))
 
 
         for asset in (video.get('assets') or []):
         for asset in (video.get('assets') or []):
-            asset_url = asset.get('url')
+            asset_url = url_or_none(asset.get('url'))
             if not asset_url:
             if not asset_url:
                 continue
                 continue
             formats.append({
             formats.append({
@@ -95,7 +73,8 @@ class IGNIE(IGNBaseIE):
                 'width': int_or_none(asset.get('width')),
                 'width': int_or_none(asset.get('width')),
             })
             })
 
 
-        mezzanine_url = try_get(video, lambda x: x['system']['mezzanineUrl'])
+        mezzanine_url = traverse_obj(
+            video, ('system', 'mezzanineUrl'), expected_type=url_or_none)
         if mezzanine_url:
         if mezzanine_url:
             formats.append({
             formats.append({
                 'ext': determine_ext(mezzanine_url, 'mp4'),
                 'ext': determine_ext(mezzanine_url, 'mp4'),
@@ -104,23 +83,21 @@ class IGNIE(IGNBaseIE):
                 'url': mezzanine_url,
                 'url': mezzanine_url,
             })
             })
 
 
-        self._sort_formats(formats)
+        if formats or fatal:
+            self._sort_formats(formats)
+        else:
+            return
 
 
-        thumbnails = []
-        for thumbnail in (video.get('thumbnails') or []):
-            thumbnail_url = thumbnail.get('url')
-            if not thumbnail_url:
-                continue
-            thumbnails.append({
-                'url': thumbnail_url,
-            })
+        thumbnails = traverse_obj(
+            video, ('thumbnails', Ellipsis, {'url': 'url'}), expected_type=url_or_none)
+        tags = traverse_obj(
+            video, ('tags', Ellipsis, 'displayName'),
+            expected_type=lambda x: x.strip() or None)
 
 
-        tags = []
-        for tag in (video.get('tags') or []):
-            display_name = tag.get('displayName')
-            if not display_name:
-                continue
-            tags.append(display_name)
+        metadata = traverse_obj(video, 'metadata', expected_type=dict) or {}
+        title = traverse_obj(
+            metadata, 'longTitle', 'title', 'name',
+            expected_type=lambda x: x.strip() or None)
 
 
         return {
         return {
             'id': video_id,
             'id': video_id,
@@ -128,14 +105,103 @@ class IGNIE(IGNBaseIE):
             'description': strip_or_none(metadata.get('description')),
             'description': strip_or_none(metadata.get('description')),
             'timestamp': parse_iso8601(metadata.get('publishDate')),
             'timestamp': parse_iso8601(metadata.get('publishDate')),
             'duration': int_or_none(metadata.get('duration')),
             'duration': int_or_none(metadata.get('duration')),
-            'display_id': display_id,
             'thumbnails': thumbnails,
             'thumbnails': thumbnails,
             'formats': formats,
             'formats': formats,
             'tags': tags,
             'tags': tags,
         }
         }
 
 
+    # yt-dlp shim
+    @classmethod
+    def _extract_from_webpage(cls, url, webpage):
+        for embed_url in orderedSet(
+                cls._extract_embed_urls(url, webpage) or [], lazy=True):
+            yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
+
 
 
-class IGNVideoIE(InfoExtractor):
+class IGNIE(IGNBaseIE):
+    """
+    Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
+    Some videos of it.ign.com are also supported
+    """
+    _VIDEO_PATH_RE = r'/(?:\d{4}/\d{2}/\d{2}/)?(?P<id>.+?)'
+    _PLAYLIST_PATH_RE = r'(?:/?\?(?P<filt>[^&#]+))?'
+    _VALID_URL = (
+        r'https?://(?:.+?\.ign|www\.pcmag)\.com/videos(?:%s)'
+        % '|'.join((_VIDEO_PATH_RE + r'(?:[/?&#]|$)', _PLAYLIST_PATH_RE)))
+    IE_NAME = 'ign.com'
+    _PAGE_TYPE = 'video'
+
+    _TESTS = [{
+        'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
+        'md5': 'd2e1586d9987d40fad7867bf96a018ea',
+        'info_dict': {
+            'id': '8f862beef863986b2785559b9e1aa599',
+            'ext': 'mp4',
+            'title': 'The Last of Us Review',
+            'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
+            'timestamp': 1370440800,
+            'upload_date': '20130605',
+            'tags': 'count:9',
+        },
+        'params': {
+            'nocheckcertificate': True,
+        },
+    }, {
+        'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data',
+        'md5': 'f1581a6fe8c5121be5b807684aeac3f6',
+        'info_dict': {
+            'id': 'ee10d774b508c9b8ec07e763b9125b91',
+            'ext': 'mp4',
+            'title': 'What\'s New Now: Is GoGo Snooping on Your Data?',
+            'description': 'md5:817a20299de610bd56f13175386da6fa',
+            'timestamp': 1420571160,
+            'upload_date': '20150106',
+            'tags': 'count:4',
+        },
+        'skip': '404 Not Found',
+    }, {
+        'url': 'https://www.ign.com/videos/is-a-resident-evil-4-remake-on-the-way-ign-daily-fix',
+        'only_matching': True,
+    }]
+
+    @classmethod
+    def _extract_embed_urls(cls, url, webpage):
+        grids = re.findall(
+            r'''(?s)<section\b[^>]+\bclass\s*=\s*['"](?:[\w-]+\s+)*?content-feed-grid(?!\B|-)[^>]+>(.+?)</section[^>]*>''',
+            webpage)
+        return filter(None,
+                      (urljoin(url, m.group('path')) for m in re.finditer(
+                          r'''<a\b[^>]+\bhref\s*=\s*('|")(?P<path>/videos%s)\1'''
+                          % cls._VIDEO_PATH_RE, grids[0] if grids else '')))
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        display_id = m.group('id')
+        if display_id:
+            return self._extract_video(url, display_id)
+        display_id = m.group('filt') or 'all'
+        return self._extract_playlist(url, display_id)
+
+    def _extract_playlist(self, url, display_id):
+        webpage = self._download_webpage(url, display_id)
+
+        return self.playlist_result(
+            (self.url_result(u, ie=self.ie_key())
+             for u in self._extract_embed_urls(url, webpage)),
+            playlist_id=display_id)
+
+    def _extract_video(self, url, display_id):
+        display_id = self._match_id(url)
+        video = self._checked_call_api(display_id)
+
+        info = self._extract_video_info(video)
+
+        return merge_dicts({
+            'display_id': display_id,
+        }, info)
+
+
+class IGNVideoIE(IGNBaseIE):
     _VALID_URL = r'https?://.+?\.ign\.com/(?:[a-z]{2}/)?[^/]+/(?P<id>\d+)/(?:video|trailer)/'
     _VALID_URL = r'https?://.+?\.ign\.com/(?:[a-z]{2}/)?[^/]+/(?P<id>\d+)/(?:video|trailer)/'
     _TESTS = [{
     _TESTS = [{
         'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s',
         'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s',
@@ -147,7 +213,8 @@ class IGNVideoIE(InfoExtractor):
             'description': 'Taking out assassination targets in Hitman has never been more stylish.',
             'description': 'Taking out assassination targets in Hitman has never been more stylish.',
             'timestamp': 1444665600,
             'timestamp': 1444665600,
             'upload_date': '20151012',
             'upload_date': '20151012',
-        }
+        },
+        'expected_warnings': ['HTTP Error 400: Bad Request'],
     }, {
     }, {
         'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds',
         'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds',
         'only_matching': True,
         'only_matching': True,
@@ -167,22 +234,38 @@ class IGNVideoIE(InfoExtractor):
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
-        req = HEADRequest(url.rsplit('/', 1)[0] + '/embed')
-        url = self._request_webpage(req, video_id).geturl()
+        parsed_url = compat_urlparse.urlparse(url)
+        embed_url = compat_urlparse.urlunparse(
+            parsed_url._replace(path=parsed_url.path.rsplit('/', 1)[0] + '/embed'))
+
+        webpage, urlh = self._download_webpage_handle(embed_url, video_id)
+        new_url = urlh.geturl()
         ign_url = compat_parse_qs(
         ign_url = compat_parse_qs(
-            compat_urllib_parse_urlparse(url).query).get('url', [None])[0]
+            compat_urlparse.urlparse(new_url).query).get('url', [None])[-1]
         if ign_url:
         if ign_url:
             return self.url_result(ign_url, IGNIE.ie_key())
             return self.url_result(ign_url, IGNIE.ie_key())
-        return self.url_result(url)
+        video = self._search_regex(r'(<div\b[^>]+\bdata-video-id\s*=\s*[^>]+>)', webpage, 'video element', fatal=False)
+        if not video:
+            if new_url == url:
+                raise ExtractorError('Redirect loop: ' + url)
+            return self.url_result(new_url)
+        video = extract_attributes(video)
+        video_data = video.get('data-settings') or '{}'
+        video_data = self._parse_json(video_data, video_id)['video']
+        info = self._extract_video_info(video_data)
+
+        return merge_dicts({
+            'display_id': video_id,
+        }, info)
 
 
 
 
 class IGNArticleIE(IGNBaseIE):
 class IGNArticleIE(IGNBaseIE):
-    _VALID_URL = r'https?://.+?\.ign\.com/(?:articles(?:/\d{4}/\d{2}/\d{2})?|(?:[a-z]{2}/)?feature/\d+)/(?P<id>[^/?&#]+)'
+    _VALID_URL = r'https?://.+?\.ign\.com/(?:articles(?:/\d{4}/\d{2}/\d{2})?|(?:[a-z]{2}/)?(?:[\w-]+/)*?feature/\d+)/(?P<id>[^/?&#]+)'
     _PAGE_TYPE = 'article'
     _PAGE_TYPE = 'article'
     _TESTS = [{
     _TESTS = [{
         'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
         'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
         'info_dict': {
         'info_dict': {
-            'id': '524497489e4e8ff5848ece34',
+            'id': '72113',
             'title': '100 Little Things in GTA 5 That Will Blow Your Mind',
             'title': '100 Little Things in GTA 5 That Will Blow Your Mind',
         },
         },
         'playlist': [
         'playlist': [
@@ -190,7 +273,7 @@ class IGNArticleIE(IGNBaseIE):
                 'info_dict': {
                 'info_dict': {
                     'id': '5ebbd138523268b93c9141af17bec937',
                     'id': '5ebbd138523268b93c9141af17bec937',
                     'ext': 'mp4',
                     'ext': 'mp4',
-                    'title': 'GTA 5 Video Review',
+                    'title': 'Grand Theft Auto V Video Review',
                     'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
                     'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
                     'timestamp': 1379339880,
                     'timestamp': 1379339880,
                     'upload_date': '20130916',
                     'upload_date': '20130916',
@@ -200,7 +283,7 @@ class IGNArticleIE(IGNBaseIE):
                 'info_dict': {
                 'info_dict': {
                     'id': '638672ee848ae4ff108df2a296418ee2',
                     'id': '638672ee848ae4ff108df2a296418ee2',
                     'ext': 'mp4',
                     'ext': 'mp4',
-                    'title': '26 Twisted Moments from GTA 5 in Slow Motion',
+                    'title': 'GTA 5 In Slow Motion',
                     'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
                     'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
                     'timestamp': 1386878820,
                     'timestamp': 1386878820,
                     'upload_date': '20131212',
                     'upload_date': '20131212',
@@ -208,16 +291,17 @@ class IGNArticleIE(IGNBaseIE):
             },
             },
         ],
         ],
         'params': {
         'params': {
-            'playlist_items': '2-3',
             'skip_download': True,
             'skip_download': True,
         },
         },
+        'expected_warnings': ['Backend fetch failed'],
     }, {
     }, {
         'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
         'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
         'info_dict': {
         'info_dict': {
             'id': '53ee806780a81ec46e0790f8',
             'id': '53ee806780a81ec46e0790f8',
             'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
             'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
         },
         },
-        'playlist_count': 2,
+        'playlist_count': 1,
+        'expected_warnings': ['Backend fetch failed'],
     }, {
     }, {
         # videoId pattern
         # videoId pattern
         'url': 'http://www.ign.com/articles/2017/06/08/new-ducktales-short-donalds-birthday-doesnt-go-as-planned',
         'url': 'http://www.ign.com/articles/2017/06/08/new-ducktales-short-donalds-birthday-doesnt-go-as-planned',
@@ -240,18 +324,91 @@ class IGNArticleIE(IGNBaseIE):
         'only_matching': True,
         'only_matching': True,
     }]
     }]
 
 
+    def _checked_call_api(self, slug):
+        try:
+            return self._call_api(slug)
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError):
+                e.cause.args = e.cause.args or [
+                    e.cause.geturl(), e.cause.getcode(), e.cause.reason]
+                if e.cause.code == 404:
+                    raise ExtractorError(
+                        'Content not found: expired?', cause=e.cause,
+                        expected=True)
+                elif e.cause.code == 503:
+                    self.report_warning(error_to_compat_str(e.cause))
+                    return
+            raise
+
+    def _search_nextjs_data(self, webpage, video_id, **kw):
+        return self._parse_json(
+            self._search_regex(
+                r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
+                webpage, 'next.js data', **kw),
+            video_id, **kw)
+
     def _real_extract(self, url):
     def _real_extract(self, url):
         display_id = self._match_id(url)
         display_id = self._match_id(url)
-        article = self._call_api(display_id)
+        article = self._checked_call_api(display_id)
+
+        if article:
+            # obsolete ?
+            def entries():
+                media_url = traverse_obj(
+                    article, ('mediaRelations', 0, 'media', 'metadata', 'url'),
+                    expected_type=url_or_none)
+                if media_url:
+                    yield self.url_result(media_url, IGNIE.ie_key())
+                for content in (article.get('content') or []):
+                    for video_url in re.findall(r'(?:\[(?:ignvideo\s+url|youtube\s+clip_id)|<iframe[^>]+src)="([^"]+)"', content):
+                        if url_or_none(video_url):
+                            yield self.url_result(video_url)
+
+            return self.playlist_result(
+                entries(), article.get('articleId'),
+                traverse_obj(
+                    article, ('metadata', 'headline'),
+                    expected_type=lambda x: x.strip() or None))
+
+        webpage = self._download_webpage(url, display_id)
+
+        playlist_id = self._html_search_meta('dable:item_id', webpage, default=None)
+        if playlist_id:
+
+            def entries():
+                for m in re.finditer(
+                        r'''(?s)<object\b[^>]+\bclass\s*=\s*("|')ign-videoplayer\1[^>]*>(?P<params>.+?)</object''',
+                        webpage):
+                    flashvars = self._search_regex(
+                        r'''(<param\b[^>]+\bname\s*=\s*("|')flashvars\2[^>]*>)''',
+                        m.group('params'), 'flashvars', default='')
+                    flashvars = compat_parse_qs(extract_attributes(flashvars).get('value') or '')
+                    v_url = url_or_none((flashvars.get('url') or [None])[-1])
+                    if v_url:
+                        yield self.url_result(v_url)
+        else:
+            playlist_id = self._search_regex(
+                r'''\bdata-post-id\s*=\s*("|')(?P<id>[\da-f]+)\1''',
+                webpage, 'id', group='id', default=None)
+
+            nextjs_data = self._search_nextjs_data(webpage, display_id)
 
 
-        def entries():
-            media_url = try_get(article, lambda x: x['mediaRelations'][0]['media']['metadata']['url'])
-            if media_url:
-                yield self.url_result(media_url, IGNIE.ie_key())
-            for content in (article.get('content') or []):
-                for video_url in re.findall(r'(?:\[(?:ignvideo\s+url|youtube\s+clip_id)|<iframe[^>]+src)="([^"]+)"', content):
-                    yield self.url_result(video_url)
+            def entries():
+                for player in traverse_obj(
+                        nextjs_data,
+                        ('props', 'apolloState', 'ROOT_QUERY', lambda k, _: k.startswith('videoPlayerProps('), '__ref')):
+                    # skip promo links (which may not always be served, eg GH CI servers)
+                    if traverse_obj(nextjs_data,
+                                    ('props', 'apolloState', player.replace('PlayerProps', 'ModernContent')),
+                                    expected_type=dict):
+                        continue
+                    video = traverse_obj(nextjs_data, ('props', 'apolloState', player), expected_type=dict) or {}
+                    info = self._extract_video_info(video, fatal=False)
+                    if info:
+                        yield merge_dicts({
+                            'display_id': display_id,
+                        }, info)
 
 
         return self.playlist_result(
         return self.playlist_result(
-            entries(), article.get('articleId'),
-            strip_or_none(try_get(article, lambda x: x['metadata']['headline'])))
+            entries(), playlist_id or display_id,
+            re.sub(r'\s+-\s+IGN\s*$', '', self._og_search_title(webpage, default='')) or None)

+ 279 - 69
youtube_dl/extractor/imgur.py

@@ -1,101 +1,267 @@
+# coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import re
 import re
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from ..utils import (
 from ..utils import (
+    determine_ext,
+    ExtractorError,
+    float_or_none,
     int_or_none,
     int_or_none,
     js_to_json,
     js_to_json,
+    merge_dicts,
     mimetype2ext,
     mimetype2ext,
-    ExtractorError,
+    parse_iso8601,
+    T,
+    traverse_obj,
+    txt_or_none,
+    url_or_none,
 )
 )
 
 
 
 
-class ImgurIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'
+class ImgurBaseIE(InfoExtractor):
+    # hard-coded value, as also used by ArchiveTeam
+    _CLIENT_ID = '546c25a59c58ad7'
+
+    @classmethod
+    def _imgur_result(cls, item_id):
+        return cls.url_result('imgur:%s' % item_id, ImgurIE.ie_key(), item_id)
+
+    def _call_api(self, endpoint, video_id, **kwargs):
+        return self._download_json(
+            'https://api.imgur.com/post/v1/%s/%s?client_id=%s&include=media,account' % (endpoint, video_id, self._CLIENT_ID),
+            video_id, **kwargs)
+
+    @staticmethod
+    def get_description(s):
+        if 'Discover the magic of the internet at Imgur' in s:
+            return None
+        return txt_or_none(s)
+
+
+class ImgurIE(ImgurBaseIE):
+    _VALID_URL = r'''(?x)
+        (?:
+            https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)|
+            imgur:
+        )(?P<id>[a-zA-Z0-9]+)
+    '''
 
 
     _TESTS = [{
     _TESTS = [{
-        'url': 'https://i.imgur.com/A61SaA1.gifv',
+        'url': 'https://imgur.com/A61SaA1',
         'info_dict': {
         'info_dict': {
             'id': 'A61SaA1',
             'id': 'A61SaA1',
             'ext': 'mp4',
             'ext': 'mp4',
             'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
             'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
+            'timestamp': 1416446068,
+            'upload_date': '20141120',
         },
         },
     }, {
     }, {
-        'url': 'https://imgur.com/A61SaA1',
+        'url': 'https://i.imgur.com/A61SaA1.gifv',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
         'url': 'https://i.imgur.com/crGpqCV.mp4',
         'url': 'https://i.imgur.com/crGpqCV.mp4',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
-        # no title
+        # previously, no title
         'url': 'https://i.imgur.com/jxBXAMC.gifv',
         'url': 'https://i.imgur.com/jxBXAMC.gifv',
-        'only_matching': True,
+        'info_dict': {
+            'id': 'jxBXAMC',
+            'ext': 'mp4',
+            'title': 'Fahaka puffer feeding',
+            'timestamp': 1533835503,
+            'upload_date': '20180809',
+        },
     }]
     }]
 
 
+    def _extract_twitter_formats(self, html, tw_id='twitter', **kwargs):
+        fatal = kwargs.pop('fatal', False)
+        tw_stream = self._html_search_meta('twitter:player:stream', html, fatal=fatal, **kwargs)
+        if not tw_stream:
+            return []
+        ext = mimetype2ext(self._html_search_meta(
+            'twitter:player:stream:content_type', html, default=None))
+        width, height = (int_or_none(self._html_search_meta('twitter:player:' + v, html, default=None))
+                         for v in ('width', 'height'))
+        return [{
+            'format_id': tw_id,
+            'url': tw_stream,
+            'ext': ext or determine_ext(tw_stream),
+            'width': width,
+            'height': height,
+        }]
+
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
+        data = self._call_api('media', video_id, fatal=False, expected_status=404)
         webpage = self._download_webpage(
         webpage = self._download_webpage(
-            'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)
+            'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id, fatal=not data) or ''
+
+        if not traverse_obj(data, ('media', 0, (
+                ('type', T(lambda t: t == 'video' or None)),
+                ('metadata', 'is_animated'))), get_all=False):
+            raise ExtractorError(
+                '%s is not a video or animated image' % video_id,
+                expected=True)
+
+        media_fmt = traverse_obj(data, ('media', 0, {
+            'url': ('url', T(url_or_none)),
+            'ext': 'ext',
+            'width': ('width', T(int_or_none)),
+            'height': ('height', T(int_or_none)),
+            'filesize': ('size', T(int_or_none)),
+            'acodec': ('metadata', 'has_sound', T(lambda b: None if b else 'none')),
+        }))
 
 
-        width = int_or_none(self._og_search_property(
-            'video:width', webpage, default=None))
-        height = int_or_none(self._og_search_property(
-            'video:height', webpage, default=None))
+        media_url = traverse_obj(media_fmt, 'url')
+        if media_url:
+            if not media_fmt.get('ext'):
+                media_fmt['ext'] = mimetype2ext(traverse_obj(
+                    data, ('media', 0, 'mime_type'))) or determine_ext(media_url)
+            if traverse_obj(data, ('media', 0, 'type')) == 'image':
+                media_fmt['acodec'] = 'none'
+                media_fmt.setdefault('preference', -10)
+
+        tw_formats = self._extract_twitter_formats(webpage)
+        if traverse_obj(tw_formats, (0, 'url')) == media_url:
+            tw_formats = []
+        else:
+            # maybe this isn't an animated image/video?
+            self._check_formats(tw_formats, video_id)
 
 
         video_elements = self._search_regex(
         video_elements = self._search_regex(
             r'(?s)<div class="video-elements">(.*?)</div>',
             r'(?s)<div class="video-elements">(.*?)</div>',
             webpage, 'video elements', default=None)
             webpage, 'video elements', default=None)
-        if not video_elements:
+        if not (video_elements or tw_formats or media_url):
             raise ExtractorError(
             raise ExtractorError(
-                'No sources found for video %s. Maybe an image?' % video_id,
+                'No sources found for video %s. Maybe a plain image?' % video_id,
                 expected=True)
                 expected=True)
 
 
-        formats = []
-        for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
-            formats.append({
-                'format_id': m.group('type').partition('/')[2],
-                'url': self._proto_relative_url(m.group('src')),
-                'ext': mimetype2ext(m.group('type')),
-                'width': width,
-                'height': height,
+        def mung_format(fmt, *extra):
+            fmt.update({
                 'http_headers': {
                 'http_headers': {
                     'User-Agent': 'youtube-dl (like wget)',
                     'User-Agent': 'youtube-dl (like wget)',
                 },
                 },
             })
             })
+            for d in extra:
+                fmt.update(d)
+            return fmt
 
 
-        gif_json = self._search_regex(
-            r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
-            webpage, 'GIF code', fatal=False)
-        if gif_json:
-            gifd = self._parse_json(
-                gif_json, video_id, transform_source=js_to_json)
-            formats.append({
-                'format_id': 'gif',
-                'preference': -10,
-                'width': width,
-                'height': height,
-                'ext': 'gif',
-                'acodec': 'none',
-                'vcodec': 'gif',
-                'container': 'gif',
-                'url': self._proto_relative_url(gifd['gifUrl']),
-                'filesize': gifd.get('size'),
-                'http_headers': {
-                    'User-Agent': 'youtube-dl (like wget)',
-                },
-            })
+        if video_elements:
+            def og_get_size(media_type):
+                return dict((p, int_or_none(self._og_search_property(
+                    ':'.join((media_type, p)), webpage, default=None)))
+                    for p in ('width', 'height'))
+
+            size = og_get_size('video')
+            if all(v is None for v in size.values()):
+                size = og_get_size('image')
+
+            formats = traverse_obj(
+                re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements),
+                (Ellipsis, {
+                    'format_id': ('type', T(lambda s: s.partition('/')[2])),
+                    'url': ('src', T(self._proto_relative_url)),
+                    'ext': ('type', T(mimetype2ext)),
+                }, T(lambda f: mung_format(f, size))))
+
+            gif_json = self._search_regex(
+                r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
+                webpage, 'GIF code', fatal=False)
+            MUST_BRANCH = (None, T(lambda _: None))
+            formats.extend(traverse_obj(gif_json, (
+                T(lambda j: self._parse_json(
+                    j, video_id, transform_source=js_to_json, fatal=False)), {
+                        'url': ('gifUrl', T(self._proto_relative_url)),
+                        'filesize': ('size', T(int_or_none)),
+                }, T(lambda f: mung_format(f, size, {
+                    'format_id': 'gif',
+                    'preference': -10,  # gifs are worse than videos
+                    'ext': 'gif',
+                    'acodec': 'none',
+                    'vcodec': 'gif',
+                    'container': 'gif',
+                })), MUST_BRANCH)))
+        else:
+            formats = []
+
+        # maybe add formats from JSON or page Twitter metadata
+        if not any((u == media_url) for u in traverse_obj(formats, (Ellipsis, 'url'))):
+            formats.append(mung_format(media_fmt))
+        tw_url = traverse_obj(tw_formats, (0, 'url'))
+        if not any((u == tw_url) for u in traverse_obj(formats, (Ellipsis, 'url'))):
+            formats.extend(mung_format(f) for f in tw_formats)
 
 
         self._sort_formats(formats)
         self._sort_formats(formats)
 
 
-        return {
+        return merge_dicts(traverse_obj(data, {
+            'uploader_id': ('account_id', T(txt_or_none),
+                            T(lambda a: a if int_or_none(a) != 0 else None)),
+            'uploader': ('account', 'username', T(txt_or_none)),
+            'uploader_url': ('account', 'avatar_url', T(url_or_none)),
+            'like_count': ('upvote_count', T(int_or_none)),
+            'dislike_count': ('downvote_count', T(int_or_none)),
+            'comment_count': ('comment_count', T(int_or_none)),
+            'age_limit': ('is_mature', T(lambda x: 18 if x else None)),
+            'timestamp': (('updated_at', 'created_at'), T(parse_iso8601)),
+            'release_timestamp': ('created_at', T(parse_iso8601)),
+        }, get_all=False), traverse_obj(data, ('media', 0, 'metadata', {
+            'title': ('title', T(txt_or_none)),
+            'description': ('description', T(self.get_description)),
+            'duration': ('duration', T(float_or_none)),
+            'timestamp': (('updated_at', 'created_at'), T(parse_iso8601)),
+            'release_timestamp': ('created_at', T(parse_iso8601)),
+        })), {
             'id': video_id,
             'id': video_id,
             'formats': formats,
             'formats': formats,
-            'title': self._og_search_title(webpage, default=video_id),
-        }
+            'title': self._og_search_title(webpage, default='Imgur video ' + video_id),
+            'description': self.get_description(self._og_search_description(webpage)),
+            'thumbnail': url_or_none(self._html_search_meta('thumbnailUrl', webpage, default=None)),
+        })
+
+
+class ImgurGalleryBaseIE(ImgurBaseIE):
+    _GALLERY = True
+
+    def _real_extract(self, url):
+        gallery_id = self._match_id(url)
 
 
+        data = self._call_api('albums', gallery_id, fatal=False, expected_status=404)
 
 
-class ImgurGalleryIE(InfoExtractor):
+        info = traverse_obj(data, {
+            'title': ('title', T(txt_or_none)),
+            'description': ('description', T(self.get_description)),
+        })
+
+        if traverse_obj(data, 'is_album'):
+
+            def yield_media_ids():
+                for m_id in traverse_obj(data, (
+                        'media', lambda _, v: v.get('type') == 'video' or v['metadata']['is_animated'],
+                        'id', T(txt_or_none))):
+                    yield m_id
+
+            # if a gallery with exactly one video, apply album metadata to video
+            media_id = (
+                self._GALLERY
+                and traverse_obj(data, ('image_count', T(lambda c: c == 1)))
+                and next(yield_media_ids(), None))
+
+            if not media_id:
+                result = self.playlist_result(
+                    map(self._imgur_result, yield_media_ids()), gallery_id)
+                result.update(info)
+                return result
+            gallery_id = media_id
+
+        result = self._imgur_result(gallery_id)
+        info['_type'] = 'url_transparent'
+        result.update(info)
+        return result
+
+
+class ImgurGalleryIE(ImgurGalleryBaseIE):
     IE_NAME = 'imgur:gallery'
     IE_NAME = 'imgur:gallery'
     _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
     _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
 
 
@@ -106,49 +272,93 @@ class ImgurGalleryIE(InfoExtractor):
             'title': 'Adding faces make every GIF better',
             'title': 'Adding faces make every GIF better',
         },
         },
         'playlist_count': 25,
         'playlist_count': 25,
+        'skip': 'Zoinks! You\'ve taken a wrong turn.',
     }, {
     }, {
+        # TODO: static images - replace with animated/video gallery
         'url': 'http://imgur.com/topic/Aww/ll5Vk',
         'url': 'http://imgur.com/topic/Aww/ll5Vk',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
         'url': 'https://imgur.com/gallery/YcAQlkx',
         'url': 'https://imgur.com/gallery/YcAQlkx',
+        'add_ies': ['Imgur'],
         'info_dict': {
         'info_dict': {
             'id': 'YcAQlkx',
             'id': 'YcAQlkx',
             'ext': 'mp4',
             'ext': 'mp4',
             'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
             'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
-        }
+            'timestamp': 1358554297,
+            'upload_date': '20130119',
+            'uploader_id': '1648642',
+            'uploader': 'wittyusernamehere',
+        },
     }, {
     }, {
+        # TODO: static image - replace with animated/video gallery
         'url': 'http://imgur.com/topic/Funny/N8rOudd',
         'url': 'http://imgur.com/topic/Funny/N8rOudd',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
         'url': 'http://imgur.com/r/aww/VQcQPhM',
         'url': 'http://imgur.com/r/aww/VQcQPhM',
-        'only_matching': True,
+        'add_ies': ['Imgur'],
+        'info_dict': {
+            'id': 'VQcQPhM',
+            'ext': 'mp4',
+            'title': 'The boss is here',
+            'timestamp': 1476494751,
+            'upload_date': '20161015',
+            'uploader_id': '19138530',
+            'uploader': 'thematrixcam',
+        },
+    },
+        # from PR #16674
+        {
+        'url': 'https://imgur.com/t/unmuted/6lAn9VQ',
+        'info_dict': {
+            'id': '6lAn9VQ',
+            'title': 'Penguins !',
+        },
+        'playlist_count': 3,
+    }, {
+        'url': 'https://imgur.com/t/unmuted/kx2uD3C',
+        'add_ies': ['Imgur'],
+        'info_dict': {
+            'id': 'ZVMv45i',
+            'ext': 'mp4',
+            'title': 'Intruder',
+            'timestamp': 1528129683,
+            'upload_date': '20180604',
+        },
+    }, {
+        'url': 'https://imgur.com/t/unmuted/wXSK0YH',
+        'add_ies': ['Imgur'],
+        'info_dict': {
+            'id': 'JCAP4io',
+            'ext': 'mp4',
+            'title': 're:I got the blues$',
+            'description': 'Luka’s vocal stylings.\n\nFP edit: don’t encourage me. I’ll never stop posting Luka and friends.',
+            'timestamp': 1527809525,
+            'upload_date': '20180531',
+        },
     }]
     }]
 
 
-    def _real_extract(self, url):
-        gallery_id = self._match_id(url)
-
-        data = self._download_json(
-            'https://imgur.com/gallery/%s.json' % gallery_id,
-            gallery_id)['data']['image']
 
 
-        if data.get('is_album'):
-            entries = [
-                self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])
-                for image in data['album_images']['images'] if image.get('hash')]
-            return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))
-
-        return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)
-
-
-class ImgurAlbumIE(ImgurGalleryIE):
+class ImgurAlbumIE(ImgurGalleryBaseIE):
     IE_NAME = 'imgur:album'
     IE_NAME = 'imgur:album'
     _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
     _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
-
+    _GALLERY = False
     _TESTS = [{
     _TESTS = [{
+        # TODO: only static images - replace with animated/video gallery
         'url': 'http://imgur.com/a/j6Orj',
         'url': 'http://imgur.com/a/j6Orj',
+        'only_matching': True,
+    },
+        # from PR #21693
+        {
+        'url': 'https://imgur.com/a/iX265HX',
+        'info_dict': {
+            'id': 'iX265HX',
+            'title': 'enen-no-shouboutai'
+        },
+        'playlist_count': 2,
+    }, {
+        'url': 'https://imgur.com/a/8pih2Ed',
         'info_dict': {
         'info_dict': {
-            'id': 'j6Orj',
-            'title': 'A Literary Analysis of "Star Wars: The Force Awakens"',
+            'id': '8pih2Ed'
         },
         },
-        'playlist_count': 12,
+        'playlist_mincount': 1,
     }]
     }]

+ 8 - 1
youtube_dl/extractor/infoq.py

@@ -1,6 +1,9 @@
 # coding: utf-8
 # coding: utf-8
 
 
 from __future__ import unicode_literals
 from __future__ import unicode_literals
+from ..utils import (
+    ExtractorError,
+)
 
 
 from ..compat import (
 from ..compat import (
     compat_b64decode,
     compat_b64decode,
@@ -90,7 +93,11 @@ class InfoQIE(BokeCCBaseIE):
         }]
         }]
 
 
     def _extract_http_audio(self, webpage, video_id):
     def _extract_http_audio(self, webpage, video_id):
-        fields = self._form_hidden_inputs('mp3Form', webpage)
+        try:
+            fields = self._form_hidden_inputs('mp3Form', webpage)
+        except ExtractorError:
+            fields = {}
+
         http_audio_url = fields.get('filename')
         http_audio_url = fields.get('filename')
         if not http_audio_url:
         if not http_audio_url:
             return []
             return []

+ 298 - 82
youtube_dl/extractor/itv.py

@@ -3,123 +3,266 @@ from __future__ import unicode_literals
 
 
 import json
 import json
 import re
 import re
+import sys
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
 from .brightcove import BrightcoveNewIE
 from .brightcove import BrightcoveNewIE
+from ..compat import (
+    compat_HTTPError,
+    compat_integer_types,
+    compat_kwargs,
+    compat_urlparse,
+)
 from ..utils import (
 from ..utils import (
     clean_html,
     clean_html,
     determine_ext,
     determine_ext,
+    error_to_compat_str,
     extract_attributes,
     extract_attributes,
-    get_element_by_class,
-    JSON_LD_RE,
+    ExtractorError,
+    get_element_by_attribute,
+    int_or_none,
     merge_dicts,
     merge_dicts,
     parse_duration,
     parse_duration,
+    parse_iso8601,
+    remove_start,
     smuggle_url,
     smuggle_url,
+    strip_or_none,
+    traverse_obj,
     url_or_none,
     url_or_none,
+    urljoin,
 )
 )
 
 
 
 
-class ITVIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?itv\.com/hub/[^/]+/(?P<id>[0-9a-zA-Z]+)'
-    _GEO_COUNTRIES = ['GB']
+class ITVBaseIE(InfoExtractor):
+
+    def _search_nextjs_data(self, webpage, video_id, **kw):
+        transform_source = kw.pop('transform_source', None)
+        fatal = kw.pop('fatal', True)
+        return self._parse_json(
+            self._search_regex(
+                r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
+                webpage, 'next.js data', group='js', fatal=fatal, **kw),
+            video_id, transform_source=transform_source, fatal=fatal)
+
+    def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
+        if errnote is False:
+            return False
+        if errnote is None:
+            errnote = 'Unable to download webpage'
+
+        errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
+        if fatal:
+            raise ExtractorError(errmsg, sys.exc_info()[2], cause=err, video_id=video_id)
+        else:
+            self._downloader.report_warning(errmsg)
+            return False
+
+    @staticmethod
+    def _vanilla_ua_header():
+        return {'User-Agent': 'Mozilla/5.0'}
+
+    def _download_webpage_handle(self, url, video_id, *args, **kwargs):
+        # specialised to (a) use vanilla UA (b) detect geo-block
+        params = self._downloader.params
+        nkwargs = {}
+        if (
+                'user_agent' not in params
+                and not any(re.match(r'(?i)user-agent\s*:', h)
+                            for h in (params.get('headers') or []))
+                and 'User-Agent' not in (kwargs.get('headers') or {})):
+
+            kwargs.setdefault('headers', {})
+            kwargs['headers'] = self._vanilla_ua_header()
+            nkwargs = kwargs
+        if kwargs.get('expected_status') is not None:
+            exp = kwargs['expected_status']
+            if isinstance(exp, compat_integer_types):
+                exp = [exp]
+            if isinstance(exp, (list, tuple)) and 403 not in exp:
+                kwargs['expected_status'] = [403]
+                kwargs['expected_status'].extend(exp)
+                nkwargs = kwargs
+        else:
+            kwargs['expected_status'] = 403
+            nkwargs = kwargs
+
+        if nkwargs:
+            kwargs = compat_kwargs(kwargs)
+
+        ret = super(ITVBaseIE, self)._download_webpage_handle(url, video_id, *args, **kwargs)
+        if ret is False:
+            return ret
+        webpage, urlh = ret
+
+        if urlh.getcode() == 403:
+            # geo-block error is like this, with an unnecessary 'Of':
+            # '{\n  "Message" : "Request Originated Outside Of Allowed Geographic Region",\
+            # \n  "TransactionId" : "oas-magni-475082-xbYF0W"\n}'
+            if '"Request Originated Outside Of Allowed Geographic Region"' in webpage:
+                self.raise_geo_restricted(countries=['GB'])
+            ret = self.__handle_request_webpage_error(
+                compat_HTTPError(urlh.geturl(), 403, 'HTTP Error 403: Forbidden', urlh.headers, urlh),
+                fatal=kwargs.get('fatal'))
+
+        return ret
+
+
+class ITVIE(ITVBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
+    _IE_DESC = 'ITVX'
     _TESTS = [{
     _TESTS = [{
+        'note': 'Hub URLs redirect to ITVX',
         'url': 'https://www.itv.com/hub/liar/2a4547a0012',
         'url': 'https://www.itv.com/hub/liar/2a4547a0012',
-        'info_dict': {
-            'id': '2a4547a0012',
-            'ext': 'mp4',
-            'title': 'Liar - Series 2 - Episode 6',
-            'description': 'md5:d0f91536569dec79ea184f0a44cca089',
-            'series': 'Liar',
-            'season_number': 2,
-            'episode_number': 6,
-        },
-        'params': {
-            # m3u8 download
-            'skip_download': True,
-        },
+        'only_matching': True,
     }, {
     }, {
-        # unavailable via data-playlist-url
+        'note': 'Hub page unavailable via data-playlist-url (404 now)',
         'url': 'https://www.itv.com/hub/through-the-keyhole/2a2271a0033',
         'url': 'https://www.itv.com/hub/through-the-keyhole/2a2271a0033',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
-        # InvalidVodcrid
+        'note': 'Hub page with InvalidVodcrid (404 now)',
         'url': 'https://www.itv.com/hub/james-martins-saturday-morning/2a5159a0034',
         'url': 'https://www.itv.com/hub/james-martins-saturday-morning/2a5159a0034',
         'only_matching': True,
         'only_matching': True,
     }, {
     }, {
-        # ContentUnavailable
+        'note': 'Hub page with ContentUnavailable (404 now)',
         'url': 'https://www.itv.com/hub/whos-doing-the-dishes/2a2898a0024',
         'url': 'https://www.itv.com/hub/whos-doing-the-dishes/2a2898a0024',
         'only_matching': True,
         'only_matching': True,
-    }]
+    }, {
+        'note': 'ITVX, or itvX, show',
+        'url': 'https://www.itv.com/watch/vera/1a7314/1a7314a0014',
+        'md5': 'bd0ad666b2c058fffe7d036785880064',
+        'info_dict': {
+            'id': '1a7314a0014',
+            'ext': 'mp4',
+            'title': 'Vera - Series 3 - Episode 4 - Prodigal Son',
+            'description': 'Vera and her team investigate the fatal stabbing of an ex-Met police officer outside a busy Newcastle nightclub - but there aren\'t many clues.',
+            'timestamp': 1653591600,
+            'upload_date': '20220526',
+            'uploader': 'ITVX',
+            'thumbnail': r're:https://\w+\.itv\.com/images/(?:\w+/)+\d+x\d+\?',
+            'duration': 5340.8,
+            'age_limit': 16,
+            'series': 'Vera',
+            'series_number': 3,
+            'episode': 'Prodigal Son',
+            'episode_number': 4,
+            'channel': 'ITV3',
+            'categories': list,
+        },
+        'params': {
+            # m3u8 download
+            # 'skip_download': True,
+        },
+        'skip': 'only available in UK',
+    }, {
+        'note': 'Latest ITV news bulletin: details change daily',
+        'url': 'https://www.itv.com/watch/news/varies-but-is-not-checked/6js5d0f',
+        'info_dict': {
+            'id': '6js5d0f',
+            'ext': 'mp4',
+            'title': r're:The latest ITV News headlines - \S.+',
+            'description': r'''re:.* today's top stories from the ITV News team.$''',
+            'timestamp': int,
+            'upload_date': r're:2\d\d\d(?:0[1-9]|1[0-2])(?:[012][1-9]|3[01])',
+            'uploader': 'ITVX',
+            'thumbnail': r're:https://images\.ctfassets\.net/(?:\w+/)+[\w.]+\.(?:jpg|png)',
+            'duration': float,
+            'age_limit': None,
+        },
+        'params': {
+            # variable download
+            # 'skip_download': True,
+        },
+        'skip': 'only available in UK',
+    }
+    ]
+
+    def _og_extract(self, webpage, require_title=False):
+        return {
+            'title': self._og_search_title(webpage, fatal=require_title),
+            'description': self._og_search_description(webpage, default=None),
+            'thumbnail': self._og_search_thumbnail(webpage, default=None),
+            'uploader': self._og_search_property('site_name', webpage, default=None),
+        }
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
+
         webpage = self._download_webpage(url, video_id)
         webpage = self._download_webpage(url, video_id)
+
+        # now quite different params!
         params = extract_attributes(self._search_regex(
         params = extract_attributes(self._search_regex(
-            r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params'))
+            r'''(<[^>]+\b(?:class|data-testid)\s*=\s*("|')genie-container\2[^>]*>)''',
+            webpage, 'params'))
+
+        ios_playlist_url = traverse_obj(
+            params, 'data-video-id', 'data-video-playlist',
+            get_all=False, expected_type=url_or_none)
 
 
-        ios_playlist_url = params.get('data-video-playlist') or params['data-video-id']
-        hmac = params['data-video-hmac']
         headers = self.geo_verification_headers()
         headers = self.geo_verification_headers()
         headers.update({
         headers.update({
             'Accept': 'application/vnd.itv.vod.playlist.v2+json',
             'Accept': 'application/vnd.itv.vod.playlist.v2+json',
             'Content-Type': 'application/json',
             'Content-Type': 'application/json',
-            'hmac': hmac.upper(),
         })
         })
         ios_playlist = self._download_json(
         ios_playlist = self._download_json(
             ios_playlist_url, video_id, data=json.dumps({
             ios_playlist_url, video_id, data=json.dumps({
                 'user': {
                 'user': {
-                    'itvUserId': '',
                     'entitlements': [],
                     'entitlements': [],
-                    'token': ''
                 },
                 },
                 'device': {
                 'device': {
-                    'manufacturer': 'Safari',
-                    'model': '5',
+                    'manufacturer': 'Mobile Safari',
+                    'model': '5.1',
                     'os': {
                     'os': {
-                        'name': 'Windows NT',
-                        'version': '6.1',
-                        'type': 'desktop'
+                        'name': 'iOS',
+                        'version': '5.0',
+                        'type': ' mobile'
                     }
                     }
                 },
                 },
                 'client': {
                 'client': {
                     'version': '4.1',
                     'version': '4.1',
-                    'id': 'browser'
+                    'id': 'browser',
+                    'supportsAdPods': True,
+                    'service': 'itv.x',
+                    'appversion': '2.43.28',
                 },
                 },
                 'variantAvailability': {
                 'variantAvailability': {
+                    'player': 'hls',
                     'featureset': {
                     'featureset': {
                         'min': ['hls', 'aes', 'outband-webvtt'],
                         'min': ['hls', 'aes', 'outband-webvtt'],
                         'max': ['hls', 'aes', 'outband-webvtt']
                         'max': ['hls', 'aes', 'outband-webvtt']
                     },
                     },
-                    'platformTag': 'dotcom'
+                    'platformTag': 'mobile'
                 }
                 }
             }).encode(), headers=headers)
             }).encode(), headers=headers)
         video_data = ios_playlist['Playlist']['Video']
         video_data = ios_playlist['Playlist']['Video']
-        ios_base_url = video_data.get('Base')
+        ios_base_url = traverse_obj(video_data, 'Base', expected_type=url_or_none)
+
+        media_url = (
+            (lambda u: url_or_none(urljoin(ios_base_url, u)))
+            if ios_base_url else url_or_none)
 
 
         formats = []
         formats = []
-        for media_file in (video_data.get('MediaFiles') or []):
-            href = media_file.get('Href')
+        for media_file in traverse_obj(video_data, 'MediaFiles', expected_type=list) or []:
+            href = traverse_obj(media_file, 'Href', expected_type=media_url)
             if not href:
             if not href:
                 continue
                 continue
-            if ios_base_url:
-                href = ios_base_url + href
             ext = determine_ext(href)
             ext = determine_ext(href)
             if ext == 'm3u8':
             if ext == 'm3u8':
                 formats.extend(self._extract_m3u8_formats(
                 formats.extend(self._extract_m3u8_formats(
-                    href, video_id, 'mp4', entry_protocol='m3u8_native',
+                    href, video_id, 'mp4', entry_protocol='m3u8',
                     m3u8_id='hls', fatal=False))
                     m3u8_id='hls', fatal=False))
+
             else:
             else:
                 formats.append({
                 formats.append({
                     'url': href,
                     'url': href,
                 })
                 })
         self._sort_formats(formats)
         self._sort_formats(formats)
+        for f in formats:
+            f.setdefault('http_headers', {})
+            f['http_headers'].update(self._vanilla_ua_header())
 
 
         subtitles = {}
         subtitles = {}
-        subs = video_data.get('Subtitles') or []
-        for sub in subs:
-            if not isinstance(sub, dict):
-                continue
-            href = url_or_none(sub.get('Href'))
+        for sub in traverse_obj(video_data, 'Subtitles', expected_type=list) or []:
+            href = traverse_obj(sub, 'Href', expected_type=url_or_none)
             if not href:
             if not href:
                 continue
                 continue
             subtitles.setdefault('en', []).append({
             subtitles.setdefault('en', []).append({
@@ -127,59 +270,132 @@ class ITVIE(InfoExtractor):
                 'ext': determine_ext(href, 'vtt'),
                 'ext': determine_ext(href, 'vtt'),
             })
             })
 
 
-        info = self._search_json_ld(webpage, video_id, default={})
-        if not info:
-            json_ld = self._parse_json(self._search_regex(
-                JSON_LD_RE, webpage, 'JSON-LD', '{}',
-                group='json_ld'), video_id, fatal=False)
-            if json_ld and json_ld.get('@type') == 'BreadcrumbList':
-                for ile in (json_ld.get('itemListElement:') or []):
-                    item = ile.get('item:') or {}
-                    if item.get('@type') == 'TVEpisode':
-                        item['@context'] = 'http://schema.org'
-                        info = self._json_ld(item, video_id, fatal=False) or {}
-                        break
+        next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
+        video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
+        title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
+        info = self._og_extract(webpage, require_title=not title)
+        tn = info.pop('thumbnail', None)
+        if tn:
+            info['thumbnails'] = [{'url': tn}]
+
+        # num. episode title
+        num_ep_title = video_data.get('numberedEpisodeTitle')
+        if not num_ep_title:
+            num_ep_title = clean_html(get_element_by_attribute('data-testid', 'episode-hero-description-strong', webpage))
+            num_ep_title = num_ep_title and num_ep_title.rstrip(' -')
+        ep_title = strip_or_none(
+            video_data.get('episodeTitle')
+            or (num_ep_title.split('.', 1)[-1] if num_ep_title else None))
+        title = title or re.sub(r'\s+-\s+ITVX$', '', info['title'])
+        if ep_title and ep_title != title:
+            title = title + ' - ' + ep_title
+
+        def get_thumbnails():
+            tns = []
+            for w, x in (traverse_obj(video_data, ('imagePresets'), expected_type=dict) or {}).items():
+                if isinstance(x, dict):
+                    for y, z in x.items():
+                        tns.append({'id': w + '_' + y, 'url': z})
+            return tns or None
+
+        video_str = lambda *x: traverse_obj(
+            video_data, *x, get_all=False, expected_type=strip_or_none)
 
 
         return merge_dicts({
         return merge_dicts({
             'id': video_id,
             'id': video_id,
-            'title': self._html_search_meta(['og:title', 'twitter:title'], webpage),
+            'title': title,
             'formats': formats,
             'formats': formats,
             'subtitles': subtitles,
             'subtitles': subtitles,
-            'duration': parse_duration(video_data.get('Duration')),
-            'description': clean_html(get_element_by_class('episode-info__synopsis', webpage)),
+            # parsing hh:mm:ss:nnn not yet patched
+            'duration': parse_duration(re.sub(r'(\d{2})(:)(\d{3}$)', r'\1.\3', video_data.get('Duration') or '')),
+            'description': video_str('synopsis'),
+            'timestamp': traverse_obj(video_data, 'broadcastDateTime', 'dateTime', expected_type=parse_iso8601),
+            'thumbnails': get_thumbnails(),
+            'series': video_str('showTitle', 'programmeTitle'),
+            'series_number': int_or_none(video_data.get('seriesNumber')),
+            'episode': ep_title,
+            'episode_number': int_or_none((num_ep_title or '').split('.')[0]),
+            'channel': video_str('channel'),
+            'categories': traverse_obj(video_data, ('categories', 'formatted'), expected_type=list),
+            'age_limit': {False: 16, True: 0}.get(video_data.get('isChildrenCategory')),
         }, info)
         }, info)
 
 
 
 
-class ITVBTCCIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?itv\.com/btcc/(?:[^/]+/)*(?P<id>[^/?#&]+)'
-    _TEST = {
-        'url': 'http://www.itv.com/btcc/races/btcc-2018-all-the-action-from-brands-hatch',
+class ITVBTCCIE(ITVBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
+    _IE_DESC = 'ITV articles: News, British Touring Car Championship'
+    _TESTS = [{
+        'note': 'British Touring Car Championship',
+        'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',
         'info_dict': {
         'info_dict': {
             'id': 'btcc-2018-all-the-action-from-brands-hatch',
             'id': 'btcc-2018-all-the-action-from-brands-hatch',
             'title': 'BTCC 2018: All the action from Brands Hatch',
             'title': 'BTCC 2018: All the action from Brands Hatch',
         },
         },
         'playlist_mincount': 9,
         'playlist_mincount': 9,
-    }
-    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1582188683001/HkiHLnNRx_default/index.html?videoId=%s'
+    }, {
+        'note': 'redirects to /btcc/articles/...',
+        'url': 'http://www.itv.com/btcc/races/btcc-2018-all-the-action-from-brands-hatch',
+        'only_matching': True,
+    }, {
+        'note': 'news article',
+        'url': 'https://www.itv.com/news/wales/2020-07-23/sean-fletcher-shows-off-wales-coastline-in-new-itv-series-as-british-tourists-opt-for-staycations',
+        'info_dict': {
+            'id': 'sean-fletcher-shows-off-wales-coastline-in-new-itv-series-as-british-tourists-opt-for-staycations',
+            'title': '''Sean Fletcher on why Wales' coastline should be your 'staycation' destination | ITV News''',
+        },
+        'playlist_mincount': 1,
+    }]
+
+    # should really be a class var of the BC IE
+    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
+    BRIGHTCOVE_ACCOUNT = '1582188683001'
+    BRIGHTCOVE_PLAYER = 'HkiHLnNRx'
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
         playlist_id = self._match_id(url)
         playlist_id = self._match_id(url)
 
 
-        webpage = self._download_webpage(url, playlist_id)
-
-        entries = [
-            self.url_result(
-                smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {
-                    # ITV does not like some GB IP ranges, so here are some
-                    # IP blocks it accepts
-                    'geo_ip_blocks': [
-                        '193.113.0.0/16', '54.36.162.0/23', '159.65.16.0/21'
-                    ],
-                    'referrer': url,
-                }),
-                ie=BrightcoveNewIE.ie_key(), video_id=video_id)
-            for video_id in re.findall(r'data-video-id=["\'](\d+)', webpage)]
+        webpage, urlh = self._download_webpage_handle(url, playlist_id)
+        link = compat_urlparse.urlparse(urlh.geturl()).path.strip('/')
+
+        next_data = self._search_nextjs_data(webpage, playlist_id, fatal=False, default='{}')
+        path_prefix = compat_urlparse.urlparse(next_data.get('assetPrefix') or '').path.strip('/')
+        link = remove_start(link, path_prefix).strip('/')
+
+        content = traverse_obj(
+            next_data, ('props', 'pageProps', Ellipsis),
+            expected_type=lambda x: x if x['link'] == link else None,
+            get_all=False, default={})
+        content = traverse_obj(
+            content, ('body', 'content', Ellipsis, 'data'),
+            expected_type=lambda x: x if x.get('name') == 'Brightcove' or x.get('type') == 'Brightcove' else None)
+
+        contraband = {
+            # ITV does not like some GB IP ranges, so here are some
+            # IP blocks it accepts
+            'geo_ip_blocks': [
+                '193.113.0.0/16', '54.36.162.0/23', '159.65.16.0/21'
+            ],
+            'referrer': urlh.geturl(),
+        }
+
+        def entries():
+
+            for data in content or []:
+                video_id = data.get('id')
+                if not video_id:
+                    continue
+                account = data.get('accountId') or self.BRIGHTCOVE_ACCOUNT
+                player = data.get('playerId') or self.BRIGHTCOVE_PLAYER
+                yield self.url_result(
+                    smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (account, player, video_id), contraband),
+                    ie=BrightcoveNewIE.ie_key(), video_id=video_id)
+
+            # obsolete ?
+            for video_id in re.findall(r'''data-video-id=["'](\d+)''', webpage):
+                yield self.url_result(
+                    smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % (self.BRIGHTCOVE_ACCOUNT, self.BRIGHTCOVE_PLAYER, video_id), contraband),
+                    ie=BrightcoveNewIE.ie_key(), video_id=video_id)
 
 
         title = self._og_search_title(webpage, fatal=False)
         title = self._og_search_title(webpage, fatal=False)
 
 
-        return self.playlist_result(entries, playlist_id, title)
+        return self.playlist_result(entries(), playlist_id, title)

+ 1 - 1
youtube_dl/extractor/kaltura.py

@@ -373,5 +373,5 @@ class KalturaIE(InfoExtractor):
             'duration': info.get('duration'),
             'duration': info.get('duration'),
             'timestamp': info.get('createdAt'),
             'timestamp': info.get('createdAt'),
             'uploader_id': info.get('userId') if info.get('userId') != 'None' else None,
             'uploader_id': info.get('userId') if info.get('userId') != 'None' else None,
-            'view_count': info.get('plays'),
+            'view_count': int_or_none(info.get('plays')),
         }
         }

+ 35 - 0
youtube_dl/extractor/kommunetv.py

@@ -0,0 +1,35 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import update_url
+
+
+class KommunetvIE(InfoExtractor):
+    _VALID_URL = r'https://(\w+).kommunetv.no/archive/(?P<id>\w+)'
+    _TEST = {
+        'url': 'https://oslo.kommunetv.no/archive/921',
+        'md5': '5f102be308ee759be1e12b63d5da4bbc',
+        'info_dict': {
+            'id': '921',
+            'title': 'Bystyremøte',
+            'ext': 'mp4'
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        headers = {
+            'Accept': 'application/json'
+        }
+        data = self._download_json('https://oslo.kommunetv.no/api/streams?streamType=1&id=%s' % video_id, video_id, headers=headers)
+        title = data['stream']['title']
+        file = data['playlist'][0]['playlist'][0]['file']
+        url = update_url(file, query=None, fragment=None)
+        formats = self._extract_m3u8_formats(url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
+        self._sort_formats(formats)
+        return {
+            'id': video_id,
+            'formats': formats,
+            'title': title
+        }

+ 31 - 0
youtube_dl/extractor/kth.py

@@ -0,0 +1,31 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import smuggle_url
+
+
+class KTHIE(InfoExtractor):
+    _VALID_URL = r'https?://play\.kth\.se/(?:[^/]+/)+(?P<id>[a-z0-9_]+)'
+    _TEST = {
+        'url': 'https://play.kth.se/media/Lunch+breakA+De+nya+aff%C3%A4rerna+inom+Fordonsdalen/0_uoop6oz9',
+        'md5': 'd83ada6d00ca98b73243a88efe19e8a6',
+        'info_dict': {
+            'id': '0_uoop6oz9',
+            'ext': 'mp4',
+            'title': 'md5:bd1d6931facb6828762a33e6ce865f37',
+            'thumbnail': 're:https?://.+/thumbnail/.+',
+            'duration': 3516,
+            'timestamp': 1647345358,
+            'upload_date': '20220315',
+            'uploader_id': 'md5:0ec23e33a89e795a4512930c8102509f',
+        }
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        result = self.url_result(
+            smuggle_url('kaltura:308:%s' % video_id, {
+                'service_url': 'https://api.kaltura.nordu.net'}),
+            'Kaltura')
+        return result

+ 99 - 24
youtube_dl/extractor/manyvids.py

@@ -1,11 +1,16 @@
 # coding: utf-8
 # coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
+import re
+
 from .common import InfoExtractor
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
 from ..utils import (
     determine_ext,
     determine_ext,
+    extract_attributes,
     int_or_none,
     int_or_none,
     str_to_int,
     str_to_int,
+    url_or_none,
     urlencode_postdata,
     urlencode_postdata,
 )
 )
 
 
@@ -20,17 +25,20 @@ class ManyVidsIE(InfoExtractor):
             'id': '133957',
             'id': '133957',
             'ext': 'mp4',
             'ext': 'mp4',
             'title': 'everthing about me (Preview)',
             'title': 'everthing about me (Preview)',
+            'uploader': 'ellyxxix',
             'view_count': int,
             'view_count': int,
             'like_count': int,
             'like_count': int,
         },
         },
     }, {
     }, {
         # full video
         # full video
         'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/',
         'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/',
-        'md5': 'f3e8f7086409e9b470e2643edb96bdcc',
+        'md5': 'bb47bab0e0802c2a60c24ef079dfe60f',
         'info_dict': {
         'info_dict': {
             'id': '935718',
             'id': '935718',
             'ext': 'mp4',
             'ext': 'mp4',
             'title': 'MY FACE REVEAL',
             'title': 'MY FACE REVEAL',
+            'description': 'md5:ec5901d41808b3746fed90face161612',
+            'uploader': 'Sarah Calanthe',
             'view_count': int,
             'view_count': int,
             'like_count': int,
             'like_count': int,
         },
         },
@@ -39,17 +47,50 @@ class ManyVidsIE(InfoExtractor):
     def _real_extract(self, url):
     def _real_extract(self, url):
         video_id = self._match_id(url)
         video_id = self._match_id(url)
 
 
-        webpage = self._download_webpage(url, video_id)
+        real_url = 'https://www.manyvids.com/video/%s/gtm.js' % (video_id, )
+        try:
+            webpage = self._download_webpage(real_url, video_id)
+        except Exception:
+            # probably useless fallback
+            webpage = self._download_webpage(url, video_id)
+
+        info = self._search_regex(
+            r'''(<div\b[^>]*\bid\s*=\s*(['"])pageMetaDetails\2[^>]*>)''',
+            webpage, 'meta details', default='')
+        info = extract_attributes(info)
+
+        player = self._search_regex(
+            r'''(<div\b[^>]*\bid\s*=\s*(['"])rmpPlayerStream\2[^>]*>)''',
+            webpage, 'player details', default='')
+        player = extract_attributes(player)
+
+        video_urls_and_ids = (
+            (info.get('data-meta-video'), 'video'),
+            (player.get('data-video-transcoded'), 'transcoded'),
+            (player.get('data-video-filepath'), 'filepath'),
+            (self._og_search_video_url(webpage, secure=False, default=None), 'og_video'),
+        )
+
+        def txt_or_none(s, default=None):
+            return (s.strip() or default) if isinstance(s, compat_str) else default
+
+        uploader = txt_or_none(info.get('data-meta-author'))
 
 
-        video_url = self._search_regex(
-            r'data-(?:video-filepath|meta-video)\s*=s*(["\'])(?P<url>(?:(?!\1).)+)\1',
-            webpage, 'video URL', group='url')
+        def mung_title(s):
+            if uploader:
+                s = re.sub(r'^\s*%s\s+[|-]' % (re.escape(uploader), ), '', s)
+            return txt_or_none(s)
 
 
-        title = self._html_search_regex(
-            (r'<span[^>]+class=["\']item-title[^>]+>([^<]+)',
-             r'<h2[^>]+class=["\']h2 m-0["\'][^>]*>([^<]+)'),
-            webpage, 'title', default=None) or self._html_search_meta(
-            'twitter:title', webpage, 'title', fatal=True)
+        title = (
+            mung_title(info.get('data-meta-title'))
+            or self._html_search_regex(
+                (r'<span[^>]+class=["\']item-title[^>]+>([^<]+)',
+                 r'<h2[^>]+class=["\']h2 m-0["\'][^>]*>([^<]+)'),
+                webpage, 'title', default=None)
+            or self._html_search_meta(
+                'twitter:title', webpage, 'title', fatal=True))
+
+        title = re.sub(r'\s*[|-]\s+ManyVids\s*$', '', title) or title
 
 
         if any(p in webpage for p in ('preview_videos', '_preview.mp4')):
         if any(p in webpage for p in ('preview_videos', '_preview.mp4')):
             title += ' (Preview)'
             title += ' (Preview)'
@@ -62,7 +103,8 @@ class ManyVidsIE(InfoExtractor):
             # Sets some cookies
             # Sets some cookies
             self._download_webpage(
             self._download_webpage(
                 'https://www.manyvids.com/includes/ajax_repository/you_had_me_at_hello.php',
                 'https://www.manyvids.com/includes/ajax_repository/you_had_me_at_hello.php',
-                video_id, fatal=False, data=urlencode_postdata({
+                video_id, note='Setting format cookies', fatal=False,
+                data=urlencode_postdata({
                     'mvtoken': mv_token,
                     'mvtoken': mv_token,
                     'vid': video_id,
                     'vid': video_id,
                 }), headers={
                 }), headers={
@@ -70,23 +112,56 @@ class ManyVidsIE(InfoExtractor):
                     'X-Requested-With': 'XMLHttpRequest'
                     'X-Requested-With': 'XMLHttpRequest'
                 })
                 })
 
 
-        if determine_ext(video_url) == 'm3u8':
-            formats = self._extract_m3u8_formats(
-                video_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                m3u8_id='hls')
-        else:
-            formats = [{'url': video_url}]
+        formats = []
+        for v_url, fmt in video_urls_and_ids:
+            v_url = url_or_none(v_url)
+            if not v_url:
+                continue
+            if determine_ext(v_url) == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    v_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id='hls'))
+            else:
+                formats.append({
+                    'url': v_url,
+                    'format_id': fmt,
+                })
+
+        self._remove_duplicate_formats(formats)
+
+        for f in formats:
+            if f.get('height') is None:
+                f['height'] = int_or_none(
+                    self._search_regex(r'_(\d{2,3}[02468])_', f['url'], 'video height', default=None))
+            if '/preview/' in f['url']:
+                f['format_id'] = '_'.join(filter(None, (f.get('format_id'), 'preview')))
+                f['preference'] = -10
+            if 'transcoded' in f['format_id']:
+                f['preference'] = f.get('preference', -1) - 1
+
+        self._sort_formats(formats)
+
+        def get_likes():
+            likes = self._search_regex(
+                r'''(<a\b[^>]*\bdata-id\s*=\s*(['"])%s\2[^>]*>)''' % (video_id, ),
+                webpage, 'likes', default='')
+            likes = extract_attributes(likes)
+            return int_or_none(likes.get('data-likes'))
 
 
-        like_count = int_or_none(self._search_regex(
-            r'data-likes=["\'](\d+)', webpage, 'like count', default=None))
-        view_count = str_to_int(self._html_search_regex(
-            r'(?s)<span[^>]+class="views-wrapper"[^>]*>(.+?)</span', webpage,
-            'view count', default=None))
+        def get_views():
+            return str_to_int(self._html_search_regex(
+                r'''(?s)<span\b[^>]*\bclass\s*=["']views-wrapper\b[^>]+>.+?<span\b[^>]+>\s*(\d[\d,.]*)\s*</span>''',
+                webpage, 'view count', default=None))
 
 
         return {
         return {
             'id': video_id,
             'id': video_id,
             'title': title,
             'title': title,
-            'view_count': view_count,
-            'like_count': like_count,
             'formats': formats,
             'formats': formats,
+            'description': txt_or_none(info.get('data-meta-description')),
+            'uploader': txt_or_none(info.get('data-meta-author')),
+            'thumbnail': (
+                url_or_none(info.get('data-meta-image'))
+                or url_or_none(player.get('data-video-screenshot'))),
+            'view_count': get_views(),
+            'like_count': get_likes(),
         }
         }

+ 5 - 1
youtube_dl/extractor/mediaset.py

@@ -24,7 +24,7 @@ class MediasetIE(ThePlatformBaseIE):
                             (?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
                             (?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
                             (?:
                             (?:
                                 (?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_|
                                 (?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_|
-                                player/index\.html\?.*?\bprogramGuid=
+                                player(?:/v\d+)?/index\.html\?.*?\bprogramGuid=
                             )
                             )
                     )(?P<id>[0-9A-Z]{16,})
                     )(?P<id>[0-9A-Z]{16,})
                     '''
                     '''
@@ -73,6 +73,10 @@ class MediasetIE(ThePlatformBaseIE):
         # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/)
         # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/)
         'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104',
         'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104',
         'only_matching': True,
         'only_matching': True,
+    }, {
+        # embedUrl (from https://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/)
+        'url': 'https://static3.mediasetplay.mediaset.it/player/v2/index.html?partnerId=wittytv&configId=&programGuid=FD00000000153323&autoplay=true&purl=http://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/',
+        'only_matching': True,
     }, {
     }, {
         'url': 'mediaset:FAFU000000665924',
         'url': 'mediaset:FAFU000000665924',
         'only_matching': True,
         'only_matching': True,

+ 1 - 1
youtube_dl/extractor/minds.py

@@ -78,7 +78,7 @@ class MindsIE(MindsBaseIE):
             else:
             else:
                 return self.url_result(entity['perma_url'])
                 return self.url_result(entity['perma_url'])
         else:
         else:
-            assert(entity['subtype'] == 'video')
+            assert (entity['subtype'] == 'video')
             video_id = entity_id
             video_id = entity_id
         # 1080p and webm formats available only on the sources array
         # 1080p and webm formats available only on the sources array
         video = self._call_api(
         video = self._call_api(

+ 24 - 6
youtube_dl/extractor/mixcloud.py

@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import itertools
 import itertools
@@ -10,7 +11,7 @@ from ..compat import (
     compat_ord,
     compat_ord,
     compat_str,
     compat_str,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote,
-    compat_zip
+    compat_zip as zip,
 )
 )
 from ..utils import (
 from ..utils import (
     int_or_none,
     int_or_none,
@@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
     def _call_api(self, object_type, object_fields, display_id, username, slug=None):
     def _call_api(self, object_type, object_fields, display_id, username, slug=None):
         lookup_key = object_type + 'Lookup'
         lookup_key = object_type + 'Lookup'
         return self._download_json(
         return self._download_json(
-            'https://www.mixcloud.com/graphql', display_id, query={
+            'https://app.mixcloud.com/graphql', display_id, query={
                 'query': '''{
                 'query': '''{
   %s(lookup: {username: "%s"%s}) {
   %s(lookup: {username: "%s"%s}) {
     %s
     %s
@@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
             'ext': 'm4a',
             'ext': 'm4a',
             'title': 'Cryptkeeper',
             'title': 'Cryptkeeper',
             'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
             'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
-            'uploader': 'Daniel Holbach',
+            'uploader': 'dholbach',  # was: 'Daniel Holbach',
             'uploader_id': 'dholbach',
             'uploader_id': 'dholbach',
             'thumbnail': r're:https?://.*\.jpg',
             'thumbnail': r're:https?://.*\.jpg',
             'view_count': int,
             'view_count': int,
@@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
             'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
             'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
             'ext': 'mp3',
             'ext': 'mp3',
             'title': 'Caribou 7 inch Vinyl Mix & Chat',
             'title': 'Caribou 7 inch Vinyl Mix & Chat',
-            'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
+            'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
             'uploader': 'Gilles Peterson Worldwide',
             'uploader': 'Gilles Peterson Worldwide',
             'uploader_id': 'gillespeterson',
             'uploader_id': 'gillespeterson',
             'thumbnail': 're:https?://.*',
             'thumbnail': 're:https?://.*',
@@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
             'timestamp': 1422987057,
             'timestamp': 1422987057,
             'upload_date': '20150203',
             'upload_date': '20150203',
         },
         },
+        'params': {
+            'skip_download': '404 not found',
+        },
+    }, {
+        'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
+        'info_dict': {
+            'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
+            'ext': 'm4a',
+            'title': 'Carnival Música Popular Brasileira Mix',
+            'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
+            'timestamp': 1454347174,
+            'upload_date': '20160201',
+            'uploader': 'Gilles Peterson Worldwide',
+            'uploader_id': 'gillespeterson',
+            'thumbnail': 're:https?://.*',
+            'view_count': int,
+        },
     }, {
     }, {
         'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
         'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
         'only_matching': True,
         'only_matching': True,
@@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
         """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
         """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
         return ''.join([
         return ''.join([
             compat_chr(compat_ord(ch) ^ compat_ord(k))
             compat_chr(compat_ord(ch) ^ compat_ord(k))
-            for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
+            for ch, k in zip(ciphertext, itertools.cycle(key))])
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):
-        username, slug = re.match(self._VALID_URL, url).groups()
+        username, slug = self._match_valid_url(url).groups()
         username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
         username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
         track_id = '%s_%s' % (username, slug)
         track_id = '%s_%s' % (username, slug)
 
 

+ 31 - 11
youtube_dl/extractor/motherless.py

@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import datetime
 import datetime
@@ -71,7 +72,7 @@ class MotherlessIE(InfoExtractor):
             'title': 'a/ Hot Teens',
             'title': 'a/ Hot Teens',
             'categories': list,
             'categories': list,
             'upload_date': '20210104',
             'upload_date': '20210104',
-            'uploader_id': 'yonbiw',
+            'uploader_id': 'anonymous',
             'thumbnail': r're:https?://.*\.jpg',
             'thumbnail': r're:https?://.*\.jpg',
             'age_limit': 18,
             'age_limit': 18,
         },
         },
@@ -125,9 +126,10 @@ class MotherlessIE(InfoExtractor):
                 kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
                 kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
                 upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
                 upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
 
 
-        comment_count = webpage.count('class="media-comment-contents"')
+        comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
         uploader_id = self._html_search_regex(
         uploader_id = self._html_search_regex(
-            r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
+            (r'''<span\b[^>]+\bclass\s*=\s*["']username\b[^>]*>([^<]+)</span>''',
+             r'''(?s)['"](?:media-meta-member|thumb-member-username)\b[^>]+>\s*<a\b[^>]+\bhref\s*=\s*['"]/m/([^"']+)'''),
             webpage, 'uploader_id')
             webpage, 'uploader_id')
 
 
         categories = self._html_search_meta('keywords', webpage, default=None)
         categories = self._html_search_meta('keywords', webpage, default=None)
@@ -169,7 +171,18 @@ class MotherlessGroupIE(InfoExtractor):
             'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
             'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
                            'any kind!'
                            'any kind!'
         },
         },
-        'playlist_mincount': 9,
+        'playlist_mincount': 0,
+        'expected_warnings': [
+            'This group has no videos.',
+        ]
+    }, {
+        'url': 'https://motherless.com/g/beautiful_cock',
+        'info_dict': {
+            'id': 'beautiful_cock',
+            'title': 'Beautiful Cock',
+            'description': 'Group for lovely cocks yours, mine, a friends anything human',
+        },
+        'playlist_mincount': 2500,
     }]
     }]
 
 
     @classmethod
     @classmethod
@@ -208,16 +221,23 @@ class MotherlessGroupIE(InfoExtractor):
             r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
             r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
         description = self._html_search_meta(
         description = self._html_search_meta(
             'description', webpage, fatal=False)
             'description', webpage, fatal=False)
-        page_count = self._int(self._search_regex(
-            r'(\d+)</(?:a|span)><(?:a|span)[^>]+>\s*NEXT',
-            webpage, 'page_count'), 'page_count')
+        page_count = str_to_int(self._search_regex(
+            r'(\d+)\s*</(?:a|span)>\s*<(?:a|span)[^>]+(?:>\s*NEXT|\brel\s*=\s*["\']?next)\b',
+            webpage, 'page_count', default=0))
+        if not page_count:
+            message = self._search_regex(
+                r'''class\s*=\s*['"]error-page\b[^>]*>\s*<p[^>]*>\s*(?P<error_msg>[^<]+)(?<=\S)\s*''',
+                webpage, 'error_msg', default=None) or 'This group has no videos.'
+            self.report_warning(message, group_id)
+            page_count = 1
         PAGE_SIZE = 80
         PAGE_SIZE = 80
 
 
         def _get_page(idx):
         def _get_page(idx):
-            webpage = self._download_webpage(
-                page_url, group_id, query={'page': idx + 1},
-                note='Downloading page %d/%d' % (idx + 1, page_count)
-            )
+            if idx > 0:
+                webpage = self._download_webpage(
+                    page_url, group_id, query={'page': idx + 1},
+                    note='Downloading page %d/%d' % (idx + 1, page_count)
+                )
             for entry in self._extract_entries(webpage, url):
             for entry in self._extract_entries(webpage, url):
                 yield entry
                 yield entry
 
 

+ 3 - 1
youtube_dl/extractor/myspass.py

@@ -35,7 +35,9 @@ class MySpassIE(InfoExtractor):
         title = xpath_text(metadata, 'title', fatal=True)
         title = xpath_text(metadata, 'title', fatal=True)
         video_url = xpath_text(metadata, 'url_flv', 'download url', True)
         video_url = xpath_text(metadata, 'url_flv', 'download url', True)
         video_id_int = int(video_id)
         video_id_int = int(video_id)
-        for group in re.search(r'/myspass2009/\d+/(\d+)/(\d+)/(\d+)/', video_url).groups():
+
+        grps = re.search(r'/myspass2009/\d+/(\d+)/(\d+)/(\d+)/', video_url)
+        for group in grps.groups() if grps else []:
             group_int = int(group)
             group_int = int(group)
             if group_int > video_id_int:
             if group_int > video_id_int:
                 video_url = video_url.replace(
                 video_url = video_url.replace(

+ 87 - 0
youtube_dl/extractor/myvideoge.py

@@ -0,0 +1,87 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    clean_html,
+    get_element_by_id,
+    get_element_by_class,
+    int_or_none,
+    js_to_json,
+    MONTH_NAMES,
+    qualities,
+    unified_strdate,
+)
+
+
+class MyVideoGeIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?myvideo\.ge/v/(?P<id>[0-9]+)'
+    _TEST = {
+        'url': 'https://www.myvideo.ge/v/3941048',
+        'md5': '8c192a7d2b15454ba4f29dc9c9a52ea9',
+        'info_dict': {
+            'id': '3941048',
+            'ext': 'mp4',
+            'title': 'The best prikol',
+            'upload_date': '20200611',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'uploader': 'chixa33',
+            'description': 'md5:5b067801318e33c2e6eea4ab90b1fdd3',
+        },
+        # working from local dev system
+        'skip': 'site blocks CI servers',
+    }
+    _MONTH_NAMES_KA = ['იანვარი', 'თებერვალი', 'მარტი', 'აპრილი', 'მაისი', 'ივნისი', 'ივლისი', 'აგვისტო', 'სექტემბერი', 'ოქტომბერი', 'ნოემბერი', 'დეკემბერი']
+
+    _quality = staticmethod(qualities(('SD', 'HD')))
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        title = (
+            self._og_search_title(webpage, default=None)
+            or clean_html(get_element_by_class('my_video_title', webpage))
+            or self._html_search_regex(r'<title\b[^>]*>([^<]+)</title\b', webpage, 'title'))
+
+        jwplayer_sources = self._parse_json(
+            self._search_regex(
+                r'''(?s)jwplayer\s*\(\s*['"]mvplayer['"]\s*\)\s*\.\s*setup\s*\(.*?\bsources\s*:\s*(\[.*?])\s*[,});]''', webpage, 'jwplayer sources', fatal=False)
+            or '',
+            video_id, transform_source=js_to_json, fatal=False)
+
+        formats = self._parse_jwplayer_formats(jwplayer_sources or [], video_id)
+        for f in formats or []:
+            f['preference'] = self._quality(f['format_id'])
+        self._sort_formats(formats)
+
+        description = (
+            self._og_search_description(webpage)
+            or get_element_by_id('long_desc_holder', webpage)
+            or self._html_search_meta('description', webpage))
+
+        uploader = self._search_regex(r'<a[^>]+class="mv_user_name"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False)
+
+        upload_date = get_element_by_class('mv_vid_upl_date', webpage)
+        # as ka locale may not be present roll a local date conversion
+        upload_date = (unified_strdate(
+            # translate any ka month to an en one
+            re.sub('|'.join(self._MONTH_NAMES_KA),
+                   lambda m: MONTH_NAMES['en'][self._MONTH_NAMES_KA.index(m.group(0))],
+                   upload_date, re.I))
+            if upload_date else None)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'uploader': uploader,
+            'formats': formats,
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'upload_date': upload_date,
+            'view_count': int_or_none(get_element_by_class('mv_vid_views', webpage)),
+            'like_count': int_or_none(get_element_by_id('likes_count', webpage)),
+            'dislike_count': int_or_none(get_element_by_id('dislikes_count', webpage)),
+        }

+ 121 - 38
youtube_dl/extractor/neteasemusic.py

@@ -1,20 +1,32 @@
 # coding: utf-8
 # coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-from hashlib import md5
 from base64 import b64encode
 from base64 import b64encode
+from binascii import hexlify
 from datetime import datetime
 from datetime import datetime
+from hashlib import md5
+from random import randint
+import json
 import re
 import re
+import time
 
 
 from .common import InfoExtractor
 from .common import InfoExtractor
+from ..aes import aes_ecb_encrypt, pkcs7_padding
 from ..compat import (
 from ..compat import (
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlencode,
     compat_str,
     compat_str,
     compat_itertools_count,
     compat_itertools_count,
 )
 )
 from ..utils import (
 from ..utils import (
-    sanitized_Request,
+    ExtractorError,
+    bytes_to_intlist,
+    error_to_compat_str,
     float_or_none,
     float_or_none,
+    int_or_none,
+    intlist_to_bytes,
+    sanitized_Request,
+    std_headers,
+    try_get,
 )
 )
 
 
 
 
@@ -35,32 +47,106 @@ class NetEaseMusicBaseIE(InfoExtractor):
         result = b64encode(m.digest()).decode('ascii')
         result = b64encode(m.digest()).decode('ascii')
         return result.replace('/', '_').replace('+', '-')
         return result.replace('/', '_').replace('+', '-')
 
 
+    @classmethod
+    def make_player_api_request_data_and_headers(cls, song_id, bitrate):
+        KEY = b'e82ckenh8dichen8'
+        URL = '/api/song/enhance/player/url'
+        now = int(time.time() * 1000)
+        rand = randint(0, 1000)
+        cookie = {
+            'osver': None,
+            'deviceId': None,
+            'appver': '8.0.0',
+            'versioncode': '140',
+            'mobilename': None,
+            'buildver': '1623435496',
+            'resolution': '1920x1080',
+            '__csrf': '',
+            'os': 'pc',
+            'channel': None,
+            'requestId': '{0}_{1:04}'.format(now, rand),
+        }
+        request_text = json.dumps(
+            {'ids': '[{0}]'.format(song_id), 'br': bitrate, 'header': cookie},
+            separators=(',', ':'))
+        message = 'nobody{0}use{1}md5forencrypt'.format(
+            URL, request_text).encode('latin1')
+        msg_digest = md5(message).hexdigest()
+
+        data = '{0}-36cd479b6b5-{1}-36cd479b6b5-{2}'.format(
+            URL, request_text, msg_digest)
+        data = pkcs7_padding(bytes_to_intlist(data))
+        encrypted = intlist_to_bytes(aes_ecb_encrypt(data, bytes_to_intlist(KEY)))
+        encrypted_params = hexlify(encrypted).decode('ascii').upper()
+
+        cookie = '; '.join(
+            ['{0}={1}'.format(k, v if v is not None else 'undefined')
+             for [k, v] in cookie.items()])
+
+        headers = {
+            'User-Agent': std_headers['User-Agent'],
+            'Content-Type': 'application/x-www-form-urlencoded',
+            'Referer': 'https://music.163.com',
+            'Cookie': cookie,
+        }
+        return ('params={0}'.format(encrypted_params), headers)
+
+    def _call_player_api(self, song_id, bitrate):
+        url = 'https://interface3.music.163.com/eapi/song/enhance/player/url'
+        data, headers = self.make_player_api_request_data_and_headers(song_id, bitrate)
+        try:
+            msg = 'empty result'
+            result = self._download_json(
+                url, song_id, data=data.encode('ascii'), headers=headers)
+            if result:
+                return result
+        except ExtractorError as e:
+            if type(e.cause) in (ValueError, TypeError):
+                # JSON load failure
+                raise
+        except Exception as e:
+            msg = error_to_compat_str(e)
+            self.report_warning('%s API call (%s) failed: %s' % (
+                song_id, bitrate, msg))
+        return {}
+
     def extract_formats(self, info):
     def extract_formats(self, info):
+        err = 0
         formats = []
         formats = []
+        song_id = info['id']
         for song_format in self._FORMATS:
         for song_format in self._FORMATS:
             details = info.get(song_format)
             details = info.get(song_format)
             if not details:
             if not details:
                 continue
                 continue
-            song_file_path = '/%s/%s.%s' % (
-                self._encrypt(details['dfsId']), details['dfsId'], details['extension'])
-
-            # 203.130.59.9, 124.40.233.182, 115.231.74.139, etc is a reverse proxy-like feature
-            # from NetEase's CDN provider that can be used if m5.music.126.net does not
-            # work, especially for users outside of Mainland China
-            # via: https://github.com/JixunMoe/unblock-163/issues/3#issuecomment-163115880
-            for host in ('http://m5.music.126.net', 'http://115.231.74.139/m1.music.126.net',
-                         'http://124.40.233.182/m1.music.126.net', 'http://203.130.59.9/m1.music.126.net'):
-                song_url = host + song_file_path
+
+            bitrate = int_or_none(details.get('bitrate')) or 999000
+            data = self._call_player_api(song_id, bitrate)
+            for song in try_get(data, lambda x: x['data'], list) or []:
+                song_url = try_get(song, lambda x: x['url'])
+                if not song_url:
+                    continue
                 if self._is_valid_url(song_url, info['id'], 'song'):
                 if self._is_valid_url(song_url, info['id'], 'song'):
                     formats.append({
                     formats.append({
                         'url': song_url,
                         'url': song_url,
                         'ext': details.get('extension'),
                         'ext': details.get('extension'),
-                        'abr': float_or_none(details.get('bitrate'), scale=1000),
+                        'abr': float_or_none(song.get('br'), scale=1000),
                         'format_id': song_format,
                         'format_id': song_format,
-                        'filesize': details.get('size'),
-                        'asr': details.get('sr')
+                        'filesize': int_or_none(song.get('size')),
+                        'asr': int_or_none(details.get('sr')),
                     })
                     })
-                    break
+                elif err == 0:
+                    err = try_get(song, lambda x: x['code'], int)
+
+        if not formats:
+            msg = 'No media links found'
+            if err != 0 and (err < 200 or err >= 400):
+                raise ExtractorError(
+                    '%s (site code %d)' % (msg, err, ), expected=True)
+            else:
+                self.raise_geo_restricted(
+                    msg + ': probably this video is not available from your location due to geo restriction.',
+                    countries=['CN'])
+
         return formats
         return formats
 
 
     @classmethod
     @classmethod
@@ -76,33 +162,19 @@ class NetEaseMusicBaseIE(InfoExtractor):
 class NetEaseMusicIE(NetEaseMusicBaseIE):
 class NetEaseMusicIE(NetEaseMusicBaseIE):
     IE_NAME = 'netease:song'
     IE_NAME = 'netease:song'
     IE_DESC = '网易云音乐'
     IE_DESC = '网易云音乐'
-    _VALID_URL = r'https?://music\.163\.com/(#/)?song\?id=(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://(y\.)?music\.163\.com/(?:[#m]/)?song\?.*?\bid=(?P<id>[0-9]+)'
     _TESTS = [{
     _TESTS = [{
         'url': 'http://music.163.com/#/song?id=32102397',
         'url': 'http://music.163.com/#/song?id=32102397',
-        'md5': 'f2e97280e6345c74ba9d5677dd5dcb45',
+        'md5': '3e909614ce09b1ccef4a3eb205441190',
         'info_dict': {
         'info_dict': {
             'id': '32102397',
             'id': '32102397',
             'ext': 'mp3',
             'ext': 'mp3',
-            'title': 'Bad Blood (feat. Kendrick Lamar)',
+            'title': 'Bad Blood',
             'creator': 'Taylor Swift / Kendrick Lamar',
             'creator': 'Taylor Swift / Kendrick Lamar',
-            'upload_date': '20150517',
-            'timestamp': 1431878400,
-            'description': 'md5:a10a54589c2860300d02e1de821eb2ef',
-        },
-        'skip': 'Blocked outside Mainland China',
-    }, {
-        'note': 'No lyrics translation.',
-        'url': 'http://music.163.com/#/song?id=29822014',
-        'info_dict': {
-            'id': '29822014',
-            'ext': 'mp3',
-            'title': '听见下雨的声音',
-            'creator': '周杰伦',
-            'upload_date': '20141225',
-            'timestamp': 1419523200,
-            'description': 'md5:a4d8d89f44656af206b7b2555c0bce6c',
+            'upload_date': '20150516',
+            'timestamp': 1431792000,
+            'description': 'md5:25fc5f27e47aad975aa6d36382c7833c',
         },
         },
-        'skip': 'Blocked outside Mainland China',
     }, {
     }, {
         'note': 'No lyrics.',
         'note': 'No lyrics.',
         'url': 'http://music.163.com/song?id=17241424',
         'url': 'http://music.163.com/song?id=17241424',
@@ -112,9 +184,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
             'title': 'Opus 28',
             'title': 'Opus 28',
             'creator': 'Dustin O\'Halloran',
             'creator': 'Dustin O\'Halloran',
             'upload_date': '20080211',
             'upload_date': '20080211',
+            'description': 'md5:f12945b0f6e0365e3b73c5032e1b0ff4',
             'timestamp': 1202745600,
             'timestamp': 1202745600,
         },
         },
-        'skip': 'Blocked outside Mainland China',
     }, {
     }, {
         'note': 'Has translated name.',
         'note': 'Has translated name.',
         'url': 'http://music.163.com/#/song?id=22735043',
         'url': 'http://music.163.com/#/song?id=22735043',
@@ -128,7 +200,18 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
             'timestamp': 1264608000,
             'timestamp': 1264608000,
             'alt_title': '说出愿望吧(Genie)',
             'alt_title': '说出愿望吧(Genie)',
         },
         },
-        'skip': 'Blocked outside Mainland China',
+    }, {
+        'url': 'https://y.music.163.com/m/song?app_version=8.8.45&id=95670&uct2=sKnvS4+0YStsWkqsPhFijw%3D%3D&dlt=0846',
+        'md5': '95826c73ea50b1c288b22180ec9e754d',
+        'info_dict': {
+            'id': '95670',
+            'ext': 'mp3',
+            'title': '国际歌',
+            'creator': '马备',
+            'upload_date': '19911130',
+            'timestamp': 691516800,
+            'description': 'md5:1ba2f911a2b0aa398479f595224f2141',
+        },
     }]
     }]
 
 
     def _process_lyrics(self, lyrics_info):
     def _process_lyrics(self, lyrics_info):

+ 18 - 3
youtube_dl/extractor/nhk.py

@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import re
 import re
@@ -7,7 +8,7 @@ from ..utils import urljoin
 
 
 
 
 class NhkBaseIE(InfoExtractor):
 class NhkBaseIE(InfoExtractor):
-    _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'
+    _API_URL_TEMPLATE = 'https://nwapi.nhk.jp/nhkworld/%sod%slist/v7b/%s/%s/%s/all%s.json'
     _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
     _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
     _TYPE_REGEX = r'/(?P<type>video|audio)/'
     _TYPE_REGEX = r'/(?P<type>video|audio)/'
 
 
@@ -23,7 +24,7 @@ class NhkBaseIE(InfoExtractor):
     def _extract_episode_info(self, url, episode=None):
     def _extract_episode_info(self, url, episode=None):
         fetch_episode = episode is None
         fetch_episode = episode is None
         lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
         lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
-        if episode_id.isdigit():
+        if len(episode_id) == 7:
             episode_id = episode_id[:4] + '-' + episode_id[4:]
             episode_id = episode_id[:4] + '-' + episode_id[4:]
 
 
         is_video = m_type == 'video'
         is_video = m_type == 'video'
@@ -84,7 +85,8 @@ class NhkBaseIE(InfoExtractor):
 
 
 
 
 class NhkVodIE(NhkBaseIE):
 class NhkVodIE(NhkBaseIE):
-    _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
+    # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
+    _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
     # Content available only for a limited period of time. Visit
     # Content available only for a limited period of time. Visit
     # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
     # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
     _TESTS = [{
     _TESTS = [{
@@ -124,6 +126,19 @@ class NhkVodIE(NhkBaseIE):
     }, {
     }, {
         'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
         'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
         'only_matching': True,
         'only_matching': True,
+    }, {
+        # video, alphabetic character in ID #29670
+        'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',
+        'only_matching': True,
+        'info_dict': {
+            'id': 'qfjay6cg',
+            'ext': 'mp4',
+            'title': 'DESIGN TALKS plus - Fishermen’s Finery',
+            'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',
+            'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$',
+            'upload_date': '20210615',
+            'timestamp': 1623722008,
+        }
     }]
     }]
 
 
     def _real_extract(self, url):
     def _real_extract(self, url):

+ 1 - 2
youtube_dl/extractor/nrk.py

@@ -60,8 +60,7 @@ class NRKBaseIE(InfoExtractor):
         return self._download_json(
         return self._download_json(
             urljoin('https://psapi.nrk.no/', path),
             urljoin('https://psapi.nrk.no/', path),
             video_id, note or 'Downloading %s JSON' % item,
             video_id, note or 'Downloading %s JSON' % item,
-            fatal=fatal, query=query,
-            headers={'Accept-Encoding': 'gzip, deflate, br'})
+            fatal=fatal, query=query)
 
 
 
 
 class NRKIE(NRKBaseIE):
 class NRKIE(NRKBaseIE):

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно