Ver Fonte

blacken all the code

https://black.readthedocs.io/
Thomas Waldmann há 2 anos atrás
pai
commit
7957af562d
78 ficheiros alterados com 6357 adições e 5430 exclusões
  1. 11 10
      conftest.py
  2. 67 81
      docs/conf.py
  3. 2 2
      scripts/errorlist.py
  4. 10 11
      scripts/glibc_check.py
  5. 11 8
      scripts/hash_sizes.py
  6. 69 75
      setup.py
  7. 195 185
      setup_docs.py
  8. 4 2
      src/borg/__init__.py
  9. 4 3
      src/borg/__main__.py
  10. 246 191
      src/borg/archive.py
  11. 370 223
      src/borg/archiver.py
  12. 304 213
      src/borg/cache.py
  13. 19 19
      src/borg/constants.py
  14. 19 22
      src/borg/crypto/file_integrity.py
  15. 140 154
      src/borg/crypto/key.py
  16. 47 45
      src/borg/crypto/keymanager.py
  17. 6 2
      src/borg/crypto/nonces.py
  18. 119 94
      src/borg/fuse.py
  19. 5 5
      src/borg/fuse_impl.py
  20. 1 1
      src/borg/helpers/__init__.py
  21. 7 6
      src/borg/helpers/checks.py
  22. 3 2
      src/borg/helpers/datastruct.py
  23. 2 0
      src/borg/helpers/errors.py
  24. 51 43
      src/borg/helpers/fs.py
  25. 53 38
      src/borg/helpers/manifest.py
  26. 55 48
      src/borg/helpers/misc.py
  27. 63 37
      src/borg/helpers/msgpack.py
  28. 281 267
      src/borg/helpers/parseformat.py
  29. 36 30
      src/borg/helpers/passphrase.py
  30. 49 39
      src/borg/helpers/process.py
  31. 13 16
      src/borg/helpers/progress.py
  32. 21 17
      src/borg/helpers/time.py
  33. 33 22
      src/borg/helpers/yes.py
  34. 23 16
      src/borg/locking.py
  35. 30 34
      src/borg/logger.py
  36. 2 2
      src/borg/lrucache.py
  37. 65 63
      src/borg/nanorst.py
  38. 36 48
      src/borg/patterns.py
  39. 1 0
      src/borg/platform/__init__.py
  40. 22 15
      src/borg/platform/base.py
  41. 6 5
      src/borg/platform/xattr.py
  42. 4 4
      src/borg/platformflags.py
  43. 306 247
      src/borg/remote.py
  44. 236 210
      src/borg/repository.py
  45. 14 9
      src/borg/selftest.py
  46. 1 1
      src/borg/shellpattern.py
  47. 35 32
      src/borg/testsuite/__init__.py
  48. 79 74
      src/borg/testsuite/archive.py
  49. 1055 913
      src/borg/testsuite/archiver.py
  50. 27 26
      src/borg/testsuite/benchmark.py
  51. 72 124
      src/borg/testsuite/cache.py
  52. 17 9
      src/borg/testsuite/checksums.py
  53. 69 53
      src/borg/testsuite/chunker.py
  54. 40 51
      src/borg/testsuite/chunker_pytest.py
  55. 3 4
      src/borg/testsuite/chunker_slow.py
  56. 50 49
      src/borg/testsuite/compress.py
  57. 128 113
      src/borg/testsuite/crypto.py
  58. 12 12
      src/borg/testsuite/efficient_collection_queue.py
  59. 51 58
      src/borg/testsuite/file_integrity.py
  60. 36 32
      src/borg/testsuite/hashindex.py
  61. 395 340
      src/borg/testsuite/helpers.py
  62. 36 39
      src/borg/testsuite/item.py
  63. 116 117
      src/borg/testsuite/key.py
  64. 65 23
      src/borg/testsuite/locking.py
  65. 8 7
      src/borg/testsuite/logger.py
  66. 16 17
      src/borg/testsuite/lrucache.py
  67. 9 11
      src/borg/testsuite/nanorst.py
  68. 14 15
      src/borg/testsuite/nonces.py
  69. 394 255
      src/borg/testsuite/patterns.py
  70. 84 56
      src/borg/testsuite/platform.py
  71. 22 22
      src/borg/testsuite/remote.py
  72. 229 212
      src/borg/testsuite/repository.py
  73. 90 97
      src/borg/testsuite/shellpattern.py
  74. 41 35
      src/borg/testsuite/version.py
  75. 24 28
      src/borg/testsuite/xattr.py
  76. 55 18
      src/borg/upgrade.py
  77. 7 7
      src/borg/version.py
  78. 16 16
      src/borg/xattr.py

+ 11 - 10
conftest.py

@@ -3,8 +3,8 @@ import os
 import pytest
 import pytest
 
 
 # needed to get pretty assertion failures in unit tests:
 # needed to get pretty assertion failures in unit tests:
-if hasattr(pytest, 'register_assert_rewrite'):
-    pytest.register_assert_rewrite('borg.testsuite')
+if hasattr(pytest, "register_assert_rewrite"):
+    pytest.register_assert_rewrite("borg.testsuite")
 
 
 
 
 import borg.cache  # noqa: E402
 import borg.cache  # noqa: E402
@@ -21,11 +21,10 @@ from borg.testsuite.platform import fakeroot_detected  # noqa: E402
 @pytest.fixture(autouse=True)
 @pytest.fixture(autouse=True)
 def clean_env(tmpdir_factory, monkeypatch):
 def clean_env(tmpdir_factory, monkeypatch):
     # avoid that we access / modify the user's normal .config / .cache directory:
     # avoid that we access / modify the user's normal .config / .cache directory:
-    monkeypatch.setenv('XDG_CONFIG_HOME', str(tmpdir_factory.mktemp('xdg-config-home')))
-    monkeypatch.setenv('XDG_CACHE_HOME', str(tmpdir_factory.mktemp('xdg-cache-home')))
+    monkeypatch.setenv("XDG_CONFIG_HOME", str(tmpdir_factory.mktemp("xdg-config-home")))
+    monkeypatch.setenv("XDG_CACHE_HOME", str(tmpdir_factory.mktemp("xdg-cache-home")))
     # also avoid to use anything from the outside environment:
     # also avoid to use anything from the outside environment:
-    keys = [key for key in os.environ
-            if key.startswith('BORG_') and key not in ('BORG_FUSE_IMPL', )]
+    keys = [key for key in os.environ if key.startswith("BORG_") and key not in ("BORG_FUSE_IMPL",)]
     for key in keys:
     for key in keys:
         monkeypatch.delenv(key, raising=False)
         monkeypatch.delenv(key, raising=False)
     # Speed up tests
     # Speed up tests
@@ -41,7 +40,7 @@ def pytest_report_header(config, startdir):
         "symlinks": are_symlinks_supported(),
         "symlinks": are_symlinks_supported(),
         "hardlinks": are_hardlinks_supported(),
         "hardlinks": are_hardlinks_supported(),
         "atime/mtime": is_utime_fully_supported(),
         "atime/mtime": is_utime_fully_supported(),
-        "modes": "BORG_TESTS_IGNORE_MODES" not in os.environ
+        "modes": "BORG_TESTS_IGNORE_MODES" not in os.environ,
     }
     }
     enabled = []
     enabled = []
     disabled = []
     disabled = []
@@ -60,9 +59,11 @@ class DefaultPatches:
         self.org_cache_wipe_cache = borg.cache.LocalCache.wipe_cache
         self.org_cache_wipe_cache = borg.cache.LocalCache.wipe_cache
 
 
         def wipe_should_not_be_called(*a, **kw):
         def wipe_should_not_be_called(*a, **kw):
-            raise AssertionError("Cache wipe was triggered, if this is part of the test add "
-                                 "@pytest.mark.allow_cache_wipe")
-        if 'allow_cache_wipe' not in request.keywords:
+            raise AssertionError(
+                "Cache wipe was triggered, if this is part of the test add " "@pytest.mark.allow_cache_wipe"
+            )
+
+        if "allow_cache_wipe" not in request.keywords:
             borg.cache.LocalCache.wipe_cache = wipe_should_not_be_called
             borg.cache.LocalCache.wipe_cache = wipe_should_not_be_called
         request.addfinalizer(self.undo)
         request.addfinalizer(self.undo)
 
 

+ 67 - 81
docs/conf.py

@@ -13,84 +13,85 @@
 # add these directories to sys.path here. If the directory is relative to the
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 import sys, os
 import sys, os
-sys.path.insert(0, os.path.abspath('../src'))
+
+sys.path.insert(0, os.path.abspath("../src"))
 
 
 from borg import __version__ as sw_version
 from borg import __version__ as sw_version
 
 
 # -- General configuration -----------------------------------------------------
 # -- General configuration -----------------------------------------------------
 
 
 # If your documentation needs a minimal Sphinx version, state it here.
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = []
 extensions = []
 
 
 # Add any paths that contain templates here, relative to this directory.
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 
 # The suffix of source filenames.
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 
 # The encoding of source files.
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 
 # The master toctree document.
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 
 # General information about the project.
 # General information about the project.
-project = 'Borg - Deduplicating Archiver'
-copyright = '2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)'
+project = "Borg - Deduplicating Archiver"
+copyright = "2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)"
 
 
 # The version info for the project you're documenting, acts as replacement for
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 # built documents.
 #
 #
 # The short X.Y version.
 # The short X.Y version.
-split_char = '+' if '+' in sw_version else '-'
+split_char = "+" if "+" in sw_version else "-"
 version = sw_version.split(split_char)[0]
 version = sw_version.split(split_char)[0]
 # The full version, including alpha/beta/rc tags.
 # The full version, including alpha/beta/rc tags.
 release = version
 release = version
 
 
-suppress_warnings = ['image.nonlocal_uri']
+suppress_warnings = ["image.nonlocal_uri"]
 
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
 # for a list of supported languages.
-#language = None
+# language = None
 
 
 # There are two options for replacing |today|: either, you set today to some
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 
 # List of patterns, relative to source directory, that match files and
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 
 # The reST default role (used for this markup: `text`) to use for all documents.
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 
 # The Borg docs contain no or very little Python docs.
 # The Borg docs contain no or very little Python docs.
 # Thus, the primary domain is rst.
 # Thus, the primary domain is rst.
-primary_domain = 'rst'
+primary_domain = "rst"
 
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 
 # If true, the current module name will be prepended to all description
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 
 # The name of the Pygments (syntax highlighting) style to use.
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 
 # A list of ignored prefixes for module index sorting.
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 
 
 # -- Options for HTML output ---------------------------------------------------
 # -- Options for HTML output ---------------------------------------------------
@@ -100,79 +101,73 @@ pygments_style = 'sphinx'
 import guzzle_sphinx_theme
 import guzzle_sphinx_theme
 
 
 html_theme_path = guzzle_sphinx_theme.html_theme_path()
 html_theme_path = guzzle_sphinx_theme.html_theme_path()
-html_theme = 'guzzle_sphinx_theme'
+html_theme = "guzzle_sphinx_theme"
 
 
 
 
 def set_rst_settings(app):
 def set_rst_settings(app):
-    app.env.settings.update({
-        'field_name_limit': 0,
-        'option_limit': 0,
-    })
+    app.env.settings.update({"field_name_limit": 0, "option_limit": 0})
 
 
 
 
 def setup(app):
 def setup(app):
-    app.add_css_file('css/borg.css')
-    app.connect('builder-inited', set_rst_settings)
+    app.add_css_file("css/borg.css")
+    app.connect("builder-inited", set_rst_settings)
+
 
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # further.  For a list of options available for each theme, see the
 # documentation.
 # documentation.
-html_theme_options = {
-    'project_nav_name': 'Borg %s' % version,
-}
+html_theme_options = {"project_nav_name": "Borg %s" % version}
 
 
 # Add any paths that contain custom themes here, relative to this directory.
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = ['_themes']
+# html_theme_path = ['_themes']
 
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 
 # The name of an image file (relative to this directory) to place at the top
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
 # of the sidebar.
-html_logo = '_static/logo.svg'
+html_logo = "_static/logo.svg"
 
 
 # The name of an image file (within the static path) to use as favicon of the
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
 # pixels large.
-html_favicon = '_static/favicon.ico'
+html_favicon = "_static/favicon.ico"
 
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['borg_theme']
+html_static_path = ["borg_theme"]
 
 
-html_extra_path = ['../src/borg/paperkey.html']
+html_extra_path = ["../src/borg/paperkey.html"]
 
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
 # typographically correct entities.
 html_use_smartypants = True
 html_use_smartypants = True
-smartquotes_action = 'qe'  # no D in there means "do not transform -- and ---"
+smartquotes_action = "qe"  # no D in there means "do not transform -- and ---"
 
 
 # Custom sidebar templates, maps document names to template names.
 # Custom sidebar templates, maps document names to template names.
-html_sidebars = {
-    '**': ['logo-text.html', 'searchbox.html', 'globaltoc.html'],
-}
+html_sidebars = {"**": ["logo-text.html", "searchbox.html", "globaltoc.html"]}
 
 
 # Additional templates that should be rendered to pages, maps page names to
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 
 # If false, no module index is generated.
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 
 # If false, no index is generated.
 # If false, no index is generated.
 html_use_index = False
 html_use_index = False
 
 
 # If true, the index is split into individual pages for each letter.
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 
 # If true, links to the reST sources are added to the pages.
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = False
 html_show_sourcelink = False
@@ -186,57 +181,45 @@ html_show_copyright = False
 # If true, an OpenSearch description file will be output, and all pages will
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 
 # Output file base name for HTML help builder.
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'borgdoc'
+htmlhelp_basename = "borgdoc"
 
 
 
 
 # -- Options for LaTeX output --------------------------------------------------
 # -- Options for LaTeX output --------------------------------------------------
 
 
 # Grouping the document tree into LaTeX files. List of tuples
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 # (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('book', 'Borg.tex', 'Borg Documentation',
-   'The Borg Collective', 'manual'),
-]
+latex_documents = [("book", "Borg.tex", "Borg Documentation", "The Borg Collective", "manual")]
 
 
 # The name of an image file (relative to this directory) to place at the top of
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
 # the title page.
-latex_logo = '_static/logo.pdf'
+latex_logo = "_static/logo.pdf"
 
 
-latex_elements = {
-    'papersize': 'a4paper',
-    'pointsize': '10pt',
-    'figure_align': 'H',
-}
+latex_elements = {"papersize": "a4paper", "pointsize": "10pt", "figure_align": "H"}
 
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 
 # If true, show page references after internal links.
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 
 # If true, show URL addresses after external links.
 # If true, show URL addresses after external links.
-latex_show_urls = 'footnote'
+latex_show_urls = "footnote"
 
 
 # Additional stuff for the LaTeX preamble.
 # Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
 
 
 # Documents to append as an appendix to all manuals.
 # Documents to append as an appendix to all manuals.
-latex_appendices = [
-    'support',
-    'resources',
-    'changes',
-    'authors',
-]
+latex_appendices = ["support", "resources", "changes", "authors"]
 
 
 # If false, no module index is generated.
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 
 
 # -- Options for manual page output --------------------------------------------
 # -- Options for manual page output --------------------------------------------
@@ -244,21 +227,24 @@ latex_appendices = [
 # One entry per manual page. List of tuples
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 # (source start file, name, description, authors, manual section).
 man_pages = [
 man_pages = [
-        ('usage', 'borg',
-         'BorgBackup is a deduplicating backup program with optional compression and authenticated encryption.',
-         ['The Borg Collective (see AUTHORS file)'],
-         1),
+    (
+        "usage",
+        "borg",
+        "BorgBackup is a deduplicating backup program with optional compression and authenticated encryption.",
+        ["The Borg Collective (see AUTHORS file)"],
+        1,
+    )
 ]
 ]
 
 
 extensions = [
 extensions = [
-    'sphinx.ext.extlinks',
-    'sphinx.ext.autodoc',
-    'sphinx.ext.todo',
-    'sphinx.ext.coverage',
-    'sphinx.ext.viewcode',
+    "sphinx.ext.extlinks",
+    "sphinx.ext.autodoc",
+    "sphinx.ext.todo",
+    "sphinx.ext.coverage",
+    "sphinx.ext.viewcode",
 ]
 ]
 
 
 extlinks = {
 extlinks = {
-    'issue': ('https://github.com/borgbackup/borg/issues/%s', '#'),
-    'targz_url': ('https://pypi.python.org/packages/source/b/borgbackup/%%s-%s.tar.gz' % version, None),
+    "issue": ("https://github.com/borgbackup/borg/issues/%s", "#"),
+    "targz_url": ("https://pypi.python.org/packages/source/b/borgbackup/%%s-%s.tar.gz" % version, None),
 }
 }

+ 2 - 2
scripts/errorlist.py

@@ -10,5 +10,5 @@ classes = Error.__subclasses__() + ErrorWithTraceback.__subclasses__()
 for cls in sorted(classes, key=lambda cls: (cls.__module__, cls.__qualname__)):
 for cls in sorted(classes, key=lambda cls: (cls.__module__, cls.__qualname__)):
     if cls is ErrorWithTraceback:
     if cls is ErrorWithTraceback:
         continue
         continue
-    print('   ', cls.__qualname__)
-    print(indent(cls.__doc__, ' ' * 8))
+    print("   ", cls.__qualname__)
+    print(indent(cls.__doc__, " " * 8))

+ 10 - 11
scripts/glibc_check.py

@@ -13,11 +13,11 @@ import sys
 
 
 verbose = True
 verbose = True
 objdump = "objdump -T %s"
 objdump = "objdump -T %s"
-glibc_re = re.compile(r'GLIBC_([0-9]\.[0-9]+)')
+glibc_re = re.compile(r"GLIBC_([0-9]\.[0-9]+)")
 
 
 
 
 def parse_version(v):
 def parse_version(v):
-    major, minor = v.split('.')
+    major, minor = v.split(".")
     return int(major), int(minor)
     return int(major), int(minor)
 
 
 
 
@@ -32,11 +32,9 @@ def main():
     overall_versions = set()
     overall_versions = set()
     for filename in filenames:
     for filename in filenames:
         try:
         try:
-            output = subprocess.check_output(objdump % filename, shell=True,
-                                             stderr=subprocess.STDOUT)
+            output = subprocess.check_output(objdump % filename, shell=True, stderr=subprocess.STDOUT)
             output = output.decode()
             output = output.decode()
-            versions = {parse_version(match.group(1))
-                        for match in glibc_re.finditer(output)}
+            versions = {parse_version(match.group(1)) for match in glibc_re.finditer(output)}
             requires_glibc = max(versions)
             requires_glibc = max(versions)
             overall_versions.add(requires_glibc)
             overall_versions.add(requires_glibc)
             if verbose:
             if verbose:
@@ -50,14 +48,15 @@ def main():
 
 
     if verbose:
     if verbose:
         if ok:
         if ok:
-            print("The binaries work with the given glibc %s." %
-                  format_version(given))
+            print("The binaries work with the given glibc %s." % format_version(given))
         else:
         else:
-            print("The binaries do not work with the given glibc %s. "
-                  "Minimum is: %s" % (format_version(given), format_version(wanted)))
+            print(
+                "The binaries do not work with the given glibc %s. "
+                "Minimum is: %s" % (format_version(given), format_version(wanted))
+            )
     return ok
     return ok
 
 
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     ok = main()
     ok = main()
     sys.exit(0 if ok else 1)
     sys.exit(0 if ok else 1)

+ 11 - 8
scripts/hash_sizes.py

@@ -23,11 +23,11 @@ policies = [
     # which growth factor to use when growing a hashtable of size < upto
     # which growth factor to use when growing a hashtable of size < upto
     # grow fast (*2.0) at the start so we do not have to resize too often (expensive).
     # grow fast (*2.0) at the start so we do not have to resize too often (expensive).
     # grow slow (*1.1) for huge hash tables (do not jump too much in memory usage)
     # grow slow (*1.1) for huge hash tables (do not jump too much in memory usage)
-    Policy(256*K, 2.0),
-    Policy(2*M, 1.7),
-    Policy(16*M, 1.4),
-    Policy(128*M, 1.2),
-    Policy(2*G-1, 1.1),
+    Policy(256 * K, 2.0),
+    Policy(2 * M, 1.7),
+    Policy(16 * M, 1.4),
+    Policy(128 * M, 1.2),
+    Policy(2 * G - 1, 1.1),
 ]
 ]
 
 
 
 
@@ -92,12 +92,15 @@ def main():
         sizes.append(p)
         sizes.append(p)
         i = int(i * grow_factor)
         i = int(i * grow_factor)
 
 
-    print("""\
+    print(
+        """\
 static int hash_sizes[] = {
 static int hash_sizes[] = {
     %s
     %s
 };
 };
-""" % ', '.join(str(size) for size in sizes))
+"""
+        % ", ".join(str(size) for size in sizes)
+    )
 
 
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
     main()

+ 69 - 75
setup.py

@@ -22,11 +22,11 @@ except ImportError:
 sys.path += [os.path.dirname(__file__)]
 sys.path += [os.path.dirname(__file__)]
 import setup_docs
 import setup_docs
 
 
-is_win32 = sys.platform.startswith('win32')
-is_openbsd = sys.platform.startswith('openbsd')
+is_win32 = sys.platform.startswith("win32")
+is_openbsd = sys.platform.startswith("openbsd")
 
 
 # Number of threads to use for cythonize, not used on windows
 # Number of threads to use for cythonize, not used on windows
-cpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing.get_start_method() != 'spawn' else None
+cpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing.get_start_method() != "spawn" else None
 
 
 # How the build process finds the system libs:
 # How the build process finds the system libs:
 #
 #
@@ -38,27 +38,23 @@ cpu_threads = multiprocessing.cpu_count() if multiprocessing and multiprocessing
 # 3. otherwise raise a fatal error.
 # 3. otherwise raise a fatal error.
 
 
 # Are we building on ReadTheDocs?
 # Are we building on ReadTheDocs?
-on_rtd = os.environ.get('READTHEDOCS')
+on_rtd = os.environ.get("READTHEDOCS")
 
 
 # Extra cflags for all extensions, usually just warnings we want to explicitly enable
 # Extra cflags for all extensions, usually just warnings we want to explicitly enable
-cflags = [
-    '-Wall',
-    '-Wextra',
-    '-Wpointer-arith',
-]
-
-compress_source = 'src/borg/compress.pyx'
-crypto_ll_source = 'src/borg/crypto/low_level.pyx'
-chunker_source = 'src/borg/chunker.pyx'
-hashindex_source = 'src/borg/hashindex.pyx'
-item_source = 'src/borg/item.pyx'
-checksums_source = 'src/borg/checksums.pyx'
-platform_posix_source = 'src/borg/platform/posix.pyx'
-platform_linux_source = 'src/borg/platform/linux.pyx'
-platform_syncfilerange_source = 'src/borg/platform/syncfilerange.pyx'
-platform_darwin_source = 'src/borg/platform/darwin.pyx'
-platform_freebsd_source = 'src/borg/platform/freebsd.pyx'
-platform_windows_source = 'src/borg/platform/windows.pyx'
+cflags = ["-Wall", "-Wextra", "-Wpointer-arith"]
+
+compress_source = "src/borg/compress.pyx"
+crypto_ll_source = "src/borg/crypto/low_level.pyx"
+chunker_source = "src/borg/chunker.pyx"
+hashindex_source = "src/borg/hashindex.pyx"
+item_source = "src/borg/item.pyx"
+checksums_source = "src/borg/checksums.pyx"
+platform_posix_source = "src/borg/platform/posix.pyx"
+platform_linux_source = "src/borg/platform/linux.pyx"
+platform_syncfilerange_source = "src/borg/platform/syncfilerange.pyx"
+platform_darwin_source = "src/borg/platform/darwin.pyx"
+platform_freebsd_source = "src/borg/platform/freebsd.pyx"
+platform_windows_source = "src/borg/platform/windows.pyx"
 
 
 cython_sources = [
 cython_sources = [
     compress_source,
     compress_source,
@@ -67,7 +63,6 @@ cython_sources = [
     hashindex_source,
     hashindex_source,
     item_source,
     item_source,
     checksums_source,
     checksums_source,
-
     platform_posix_source,
     platform_posix_source,
     platform_linux_source,
     platform_linux_source,
     platform_syncfilerange_source,
     platform_syncfilerange_source,
@@ -79,19 +74,20 @@ cython_sources = [
 if cythonize:
 if cythonize:
     Sdist = sdist
     Sdist = sdist
 else:
 else:
+
     class Sdist(sdist):
     class Sdist(sdist):
         def __init__(self, *args, **kwargs):
         def __init__(self, *args, **kwargs):
-            raise Exception('Cython is required to run sdist')
+            raise Exception("Cython is required to run sdist")
 
 
-    cython_c_files = [fn.replace('.pyx', '.c') for fn in cython_sources]
+    cython_c_files = [fn.replace(".pyx", ".c") for fn in cython_sources]
     if not on_rtd and not all(os.path.exists(path) for path in cython_c_files):
     if not on_rtd and not all(os.path.exists(path) for path in cython_c_files):
-        raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
+        raise ImportError("The GIT version of Borg needs Cython. Install Cython or use a released version.")
 
 
 
 
 def rm(file):
 def rm(file):
     try:
     try:
         os.unlink(file)
         os.unlink(file)
-        print('rm', file)
+        print("rm", file)
     except FileNotFoundError:
     except FileNotFoundError:
         pass
         pass
 
 
@@ -107,19 +103,19 @@ class Clean(Command):
 
 
     def run(self):
     def run(self):
         for source in cython_sources:
         for source in cython_sources:
-            genc = source.replace('.pyx', '.c')
+            genc = source.replace(".pyx", ".c")
             rm(genc)
             rm(genc)
-            compiled_glob = source.replace('.pyx', '.cpython*')
+            compiled_glob = source.replace(".pyx", ".cpython*")
             for compiled in sorted(glob(compiled_glob)):
             for compiled in sorted(glob(compiled_glob)):
                 rm(compiled)
                 rm(compiled)
 
 
 
 
 cmdclass = {
 cmdclass = {
-    'build_ext': build_ext,
-    'build_usage': setup_docs.build_usage,
-    'build_man': setup_docs.build_man,
-    'sdist': Sdist,
-    'clean2': Clean,
+    "build_ext": build_ext,
+    "build_usage": setup_docs.build_usage,
+    "build_man": setup_docs.build_man,
+    "sdist": Sdist,
+    "clean2": Clean,
 }
 }
 
 
 
 
@@ -137,16 +133,18 @@ if not on_rtd:
     try:
     try:
         import pkgconfig as pc
         import pkgconfig as pc
     except ImportError:
     except ImportError:
-        print('Warning: can not import pkgconfig python package.')
+        print("Warning: can not import pkgconfig python package.")
         pc = None
         pc = None
 
 
-    def lib_ext_kwargs(pc, prefix_env_var, lib_name, lib_pkg_name, pc_version, lib_subdir='lib'):
+    def lib_ext_kwargs(pc, prefix_env_var, lib_name, lib_pkg_name, pc_version, lib_subdir="lib"):
         system_prefix = os.environ.get(prefix_env_var)
         system_prefix = os.environ.get(prefix_env_var)
         if system_prefix:
         if system_prefix:
             print(f"Detected and preferring {lib_pkg_name} [via {prefix_env_var}]")
             print(f"Detected and preferring {lib_pkg_name} [via {prefix_env_var}]")
-            return dict(include_dirs=[os.path.join(system_prefix, 'include')],
-                        library_dirs=[os.path.join(system_prefix, lib_subdir)],
-                        libraries=[lib_name])
+            return dict(
+                include_dirs=[os.path.join(system_prefix, "include")],
+                library_dirs=[os.path.join(system_prefix, lib_subdir)],
+                libraries=[lib_name],
+            )
 
 
         if pc and pc.installed(lib_pkg_name, pc_version):
         if pc and pc.installed(lib_pkg_name, pc_version):
             print(f"Detected and preferring {lib_pkg_name} [via pkg-config]")
             print(f"Detected and preferring {lib_pkg_name} [via pkg-config]")
@@ -158,16 +156,13 @@ if not on_rtd:
 
 
     crypto_ldflags = []
     crypto_ldflags = []
     if is_win32:
     if is_win32:
-        crypto_ext_lib = lib_ext_kwargs(
-            pc, 'BORG_OPENSSL_PREFIX', 'libcrypto', 'libcrypto', '>=1.1.1', lib_subdir='')
+        crypto_ext_lib = lib_ext_kwargs(pc, "BORG_OPENSSL_PREFIX", "libcrypto", "libcrypto", ">=1.1.1", lib_subdir="")
     elif is_openbsd:
     elif is_openbsd:
         # use openssl (not libressl) because we need AES-OCB and CHACHA20-POLY1305 via EVP api
         # use openssl (not libressl) because we need AES-OCB and CHACHA20-POLY1305 via EVP api
-        crypto_ext_lib = lib_ext_kwargs(
-            pc, 'BORG_OPENSSL_PREFIX', 'crypto', 'libecrypto11', '>=1.1.1')
-        crypto_ldflags += ['-Wl,-rpath=/usr/local/lib/eopenssl11']
+        crypto_ext_lib = lib_ext_kwargs(pc, "BORG_OPENSSL_PREFIX", "crypto", "libecrypto11", ">=1.1.1")
+        crypto_ldflags += ["-Wl,-rpath=/usr/local/lib/eopenssl11"]
     else:
     else:
-        crypto_ext_lib = lib_ext_kwargs(
-            pc, 'BORG_OPENSSL_PREFIX', 'crypto', 'libcrypto', '>=1.1.1')
+        crypto_ext_lib = lib_ext_kwargs(pc, "BORG_OPENSSL_PREFIX", "crypto", "libcrypto", ">=1.1.1")
 
 
     crypto_ext_kwargs = members_appended(
     crypto_ext_kwargs = members_appended(
         dict(sources=[crypto_ll_source]),
         dict(sources=[crypto_ll_source]),
@@ -178,57 +173,60 @@ if not on_rtd:
 
 
     compress_ext_kwargs = members_appended(
     compress_ext_kwargs = members_appended(
         dict(sources=[compress_source]),
         dict(sources=[compress_source]),
-        lib_ext_kwargs(pc, 'BORG_LIBLZ4_PREFIX', 'lz4', 'liblz4', '>= 1.7.0'),
-        lib_ext_kwargs(pc, 'BORG_LIBZSTD_PREFIX', 'zstd', 'libzstd', '>= 1.3.0'),
+        lib_ext_kwargs(pc, "BORG_LIBLZ4_PREFIX", "lz4", "liblz4", ">= 1.7.0"),
+        lib_ext_kwargs(pc, "BORG_LIBZSTD_PREFIX", "zstd", "libzstd", ">= 1.3.0"),
         dict(extra_compile_args=cflags),
         dict(extra_compile_args=cflags),
     )
     )
 
 
     checksums_ext_kwargs = members_appended(
     checksums_ext_kwargs = members_appended(
         dict(sources=[checksums_source]),
         dict(sources=[checksums_source]),
-        lib_ext_kwargs(pc, 'BORG_LIBXXHASH_PREFIX', 'xxhash', 'libxxhash', '>= 0.7.3'),
+        lib_ext_kwargs(pc, "BORG_LIBXXHASH_PREFIX", "xxhash", "libxxhash", ">= 0.7.3"),
         dict(extra_compile_args=cflags),
         dict(extra_compile_args=cflags),
     )
     )
 
 
     ext_modules += [
     ext_modules += [
-        Extension('borg.crypto.low_level', **crypto_ext_kwargs),
-        Extension('borg.compress', **compress_ext_kwargs),
-        Extension('borg.hashindex', [hashindex_source], extra_compile_args=cflags),
-        Extension('borg.item', [item_source], extra_compile_args=cflags),
-        Extension('borg.chunker', [chunker_source], extra_compile_args=cflags),
-        Extension('borg.checksums', **checksums_ext_kwargs),
+        Extension("borg.crypto.low_level", **crypto_ext_kwargs),
+        Extension("borg.compress", **compress_ext_kwargs),
+        Extension("borg.hashindex", [hashindex_source], extra_compile_args=cflags),
+        Extension("borg.item", [item_source], extra_compile_args=cflags),
+        Extension("borg.chunker", [chunker_source], extra_compile_args=cflags),
+        Extension("borg.checksums", **checksums_ext_kwargs),
     ]
     ]
 
 
-    posix_ext = Extension('borg.platform.posix', [platform_posix_source], extra_compile_args=cflags)
-    linux_ext = Extension('borg.platform.linux', [platform_linux_source], libraries=['acl'], extra_compile_args=cflags)
-    syncfilerange_ext = Extension('borg.platform.syncfilerange', [platform_syncfilerange_source], extra_compile_args=cflags)
-    freebsd_ext = Extension('borg.platform.freebsd', [platform_freebsd_source], extra_compile_args=cflags)
-    darwin_ext = Extension('borg.platform.darwin', [platform_darwin_source], extra_compile_args=cflags)
-    windows_ext = Extension('borg.platform.windows', [platform_windows_source], extra_compile_args=cflags)
+    posix_ext = Extension("borg.platform.posix", [platform_posix_source], extra_compile_args=cflags)
+    linux_ext = Extension("borg.platform.linux", [platform_linux_source], libraries=["acl"], extra_compile_args=cflags)
+    syncfilerange_ext = Extension(
+        "borg.platform.syncfilerange", [platform_syncfilerange_source], extra_compile_args=cflags
+    )
+    freebsd_ext = Extension("borg.platform.freebsd", [platform_freebsd_source], extra_compile_args=cflags)
+    darwin_ext = Extension("borg.platform.darwin", [platform_darwin_source], extra_compile_args=cflags)
+    windows_ext = Extension("borg.platform.windows", [platform_windows_source], extra_compile_args=cflags)
 
 
     if not is_win32:
     if not is_win32:
         ext_modules.append(posix_ext)
         ext_modules.append(posix_ext)
     else:
     else:
         ext_modules.append(windows_ext)
         ext_modules.append(windows_ext)
-    if sys.platform == 'linux':
+    if sys.platform == "linux":
         ext_modules.append(linux_ext)
         ext_modules.append(linux_ext)
         ext_modules.append(syncfilerange_ext)
         ext_modules.append(syncfilerange_ext)
-    elif sys.platform.startswith('freebsd'):
+    elif sys.platform.startswith("freebsd"):
         ext_modules.append(freebsd_ext)
         ext_modules.append(freebsd_ext)
-    elif sys.platform == 'darwin':
+    elif sys.platform == "darwin":
         ext_modules.append(darwin_ext)
         ext_modules.append(darwin_ext)
 
 
     # sometimes there's no need to cythonize
     # sometimes there's no need to cythonize
     # this breaks chained commands like 'clean sdist'
     # this breaks chained commands like 'clean sdist'
-    cythonizing = len(sys.argv) > 1 and sys.argv[1] not in (
-        ('clean', 'clean2', 'egg_info', '--help-commands', '--version')) and '--help' not in sys.argv[1:]
+    cythonizing = (
+        len(sys.argv) > 1
+        and sys.argv[1] not in (("clean", "clean2", "egg_info", "--help-commands", "--version"))
+        and "--help" not in sys.argv[1:]
+    )
 
 
     if cythonize and cythonizing:
     if cythonize and cythonizing:
-        cython_opts = dict(
-            compiler_directives={'language_level': '3str'},
-        )
+        cython_opts = dict(compiler_directives={"language_level": "3str"})
         if not is_win32:
         if not is_win32:
             # compile .pyx extensions to .c in parallel, does not work on windows
             # compile .pyx extensions to .c in parallel, does not work on windows
-            cython_opts['nthreads'] = cpu_threads
+            cython_opts["nthreads"] = cpu_threads
 
 
         # generate C code from Cython for ALL supported platforms, so we have them in the sdist.
         # generate C code from Cython for ALL supported platforms, so we have them in the sdist.
         # the sdist does not require Cython at install time, so we need all as C.
         # the sdist does not require Cython at install time, so we need all as C.
@@ -237,8 +235,4 @@ if not on_rtd:
         ext_modules = cythonize(ext_modules, **cython_opts)
         ext_modules = cythonize(ext_modules, **cython_opts)
 
 
 
 
-setup(
-    cmdclass=cmdclass,
-    ext_modules=ext_modules,
-    long_description=setup_docs.long_desc_from_readme()
-)
+setup(cmdclass=cmdclass, ext_modules=ext_modules, long_description=setup_docs.long_desc_from_readme())

+ 195 - 185
setup_docs.py

@@ -12,36 +12,34 @@ from setuptools import Command
 
 
 
 
 def long_desc_from_readme():
 def long_desc_from_readme():
-    with open('README.rst') as fd:
+    with open("README.rst") as fd:
         long_description = fd.read()
         long_description = fd.read()
         # remove header, but have one \n before first headline
         # remove header, but have one \n before first headline
-        start = long_description.find('What is BorgBackup?')
+        start = long_description.find("What is BorgBackup?")
         assert start >= 0
         assert start >= 0
-        long_description = '\n' + long_description[start:]
+        long_description = "\n" + long_description[start:]
         # remove badges
         # remove badges
-        long_description = re.compile(r'^\.\. start-badges.*^\.\. end-badges', re.M | re.S).sub('', long_description)
+        long_description = re.compile(r"^\.\. start-badges.*^\.\. end-badges", re.M | re.S).sub("", long_description)
         # remove unknown directives
         # remove unknown directives
-        long_description = re.compile(r'^\.\. highlight:: \w+$', re.M).sub('', long_description)
+        long_description = re.compile(r"^\.\. highlight:: \w+$", re.M).sub("", long_description)
         return long_description
         return long_description
 
 
 
 
 def format_metavar(option):
 def format_metavar(option):
-    if option.nargs in ('*', '...'):
-        return '[%s...]' % option.metavar
-    elif option.nargs == '?':
-        return '[%s]' % option.metavar
+    if option.nargs in ("*", "..."):
+        return "[%s...]" % option.metavar
+    elif option.nargs == "?":
+        return "[%s]" % option.metavar
     elif option.nargs is None:
     elif option.nargs is None:
         return option.metavar
         return option.metavar
     else:
     else:
-        raise ValueError(f'Can\'t format metavar {option.metavar}, unknown nargs {option.nargs}!')
+        raise ValueError(f"Can't format metavar {option.metavar}, unknown nargs {option.nargs}!")
 
 
 
 
 class build_usage(Command):
 class build_usage(Command):
     description = "generate usage for each command"
     description = "generate usage for each command"
 
 
-    user_options = [
-        ('output=', 'O', 'output directory'),
-    ]
+    user_options = [("output=", "O", "output directory")]
 
 
     def initialize_options(self):
     def initialize_options(self):
         pass
         pass
@@ -50,17 +48,19 @@ class build_usage(Command):
         pass
         pass
 
 
     def run(self):
     def run(self):
-        print('generating usage docs')
+        print("generating usage docs")
         import borg
         import borg
-        borg.doc_mode = 'build_man'
-        if not os.path.exists('docs/usage'):
-            os.mkdir('docs/usage')
+
+        borg.doc_mode = "build_man"
+        if not os.path.exists("docs/usage"):
+            os.mkdir("docs/usage")
         # allows us to build docs without the C modules fully loaded during help generation
         # allows us to build docs without the C modules fully loaded during help generation
         from borg.archiver import Archiver
         from borg.archiver import Archiver
-        parser = Archiver(prog='borg').build_parser()
+
+        parser = Archiver(prog="borg").build_parser()
         # borgfs has a separate man page to satisfy debian's "every program from a package
         # borgfs has a separate man page to satisfy debian's "every program from a package
         # must have a man page" requirement, but it doesn't need a separate HTML docs page
         # must have a man page" requirement, but it doesn't need a separate HTML docs page
-        #borgfs_parser = Archiver(prog='borgfs').build_parser()
+        # borgfs_parser = Archiver(prog='borgfs').build_parser()
 
 
         self.generate_level("", parser, Archiver)
         self.generate_level("", parser, Archiver)
 
 
@@ -68,7 +68,7 @@ class build_usage(Command):
         is_subcommand = False
         is_subcommand = False
         choices = {}
         choices = {}
         for action in parser._actions:
         for action in parser._actions:
-            if action.choices is not None and 'SubParsersAction' in str(action.__class__):
+            if action.choices is not None and "SubParsersAction" in str(action.__class__):
                 is_subcommand = True
                 is_subcommand = True
                 for cmd, parser in action.choices.items():
                 for cmd, parser in action.choices.items():
                     choices[prefix + cmd] = parser
                     choices[prefix + cmd] = parser
@@ -76,32 +76,37 @@ class build_usage(Command):
             choices.update(extra_choices)
             choices.update(extra_choices)
         if prefix and not choices:
         if prefix and not choices:
             return
             return
-        print('found commands: %s' % list(choices.keys()))
+        print("found commands: %s" % list(choices.keys()))
 
 
         for command, parser in sorted(choices.items()):
         for command, parser in sorted(choices.items()):
-            if command.startswith('debug'):
-                print('skipping', command)
+            if command.startswith("debug"):
+                print("skipping", command)
                 continue
                 continue
-            print('generating help for %s' % command)
+            print("generating help for %s" % command)
 
 
             if self.generate_level(command + " ", parser, Archiver):
             if self.generate_level(command + " ", parser, Archiver):
                 continue
                 continue
 
 
-            with open('docs/usage/%s.rst.inc' % command.replace(" ", "_"), 'w') as doc:
+            with open("docs/usage/%s.rst.inc" % command.replace(" ", "_"), "w") as doc:
                 doc.write(".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\n\n")
                 doc.write(".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\n\n")
-                if command == 'help':
+                if command == "help":
                     for topic in Archiver.helptext:
                     for topic in Archiver.helptext:
-                        params = {"topic": topic,
-                                  "underline": '~' * len('borg help ' + topic)}
+                        params = {"topic": topic, "underline": "~" * len("borg help " + topic)}
                         doc.write(".. _borg_{topic}:\n\n".format(**params))
                         doc.write(".. _borg_{topic}:\n\n".format(**params))
                         doc.write("borg help {topic}\n{underline}\n\n".format(**params))
                         doc.write("borg help {topic}\n{underline}\n\n".format(**params))
                         doc.write(Archiver.helptext[topic])
                         doc.write(Archiver.helptext[topic])
                 else:
                 else:
-                    params = {"command": command,
-                              "command_": command.replace(' ', '_'),
-                              "underline": '-' * len('borg ' + command)}
+                    params = {
+                        "command": command,
+                        "command_": command.replace(" ", "_"),
+                        "underline": "-" * len("borg " + command),
+                    }
                     doc.write(".. _borg_{command_}:\n\n".format(**params))
                     doc.write(".. _borg_{command_}:\n\n".format(**params))
-                    doc.write("borg {command}\n{underline}\n.. code-block:: none\n\n    borg [common options] {command}".format(**params))
+                    doc.write(
+                        "borg {command}\n{underline}\n.. code-block:: none\n\n    borg [common options] {command}".format(
+                            **params
+                        )
+                    )
                     self.write_usage(parser, doc)
                     self.write_usage(parser, doc)
                     epilog = parser.epilog
                     epilog = parser.epilog
                     parser.epilog = None
                     parser.epilog = None
@@ -109,21 +114,21 @@ class build_usage(Command):
                     doc.write("\n\nDescription\n~~~~~~~~~~~\n")
                     doc.write("\n\nDescription\n~~~~~~~~~~~\n")
                     doc.write(epilog)
                     doc.write(epilog)
 
 
-        if 'create' in choices:
-            common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
-            with open('docs/usage/common-options.rst.inc', 'w') as doc:
+        if "create" in choices:
+            common_options = [group for group in choices["create"]._action_groups if group.title == "Common options"][0]
+            with open("docs/usage/common-options.rst.inc", "w") as doc:
                 self.write_options_group(common_options, doc, False, base_indent=0)
                 self.write_options_group(common_options, doc, False, base_indent=0)
 
 
         return is_subcommand
         return is_subcommand
 
 
     def write_usage(self, parser, fp):
     def write_usage(self, parser, fp):
         if any(len(o.option_strings) for o in parser._actions):
         if any(len(o.option_strings) for o in parser._actions):
-            fp.write(' [options]')
+            fp.write(" [options]")
         for option in parser._actions:
         for option in parser._actions:
             if option.option_strings:
             if option.option_strings:
                 continue
                 continue
-            fp.write(' ' + format_metavar(option))
-        fp.write('\n\n')
+            fp.write(" " + format_metavar(option))
+        fp.write("\n\n")
 
 
     def write_options(self, parser, fp):
     def write_options(self, parser, fp):
         def is_positional_group(group):
         def is_positional_group(group):
@@ -134,58 +139,58 @@ class build_usage(Command):
 
 
         def html_write(s):
         def html_write(s):
             for line in s.splitlines():
             for line in s.splitlines():
-                fp.write('    ' + line + '\n')
+                fp.write("    " + line + "\n")
 
 
         rows = []
         rows = []
         for group in parser._action_groups:
         for group in parser._action_groups:
-            if group.title == 'Common options':
+            if group.title == "Common options":
                 # (no of columns used, columns, ...)
                 # (no of columns used, columns, ...)
-                rows.append((1, '.. class:: borg-common-opt-ref\n\n:ref:`common_options`'))
+                rows.append((1, ".. class:: borg-common-opt-ref\n\n:ref:`common_options`"))
             else:
             else:
                 if not group._group_actions:
                 if not group._group_actions:
                     continue
                     continue
-                group_header = '**%s**' % group.title
+                group_header = "**%s**" % group.title
                 if group.description:
                 if group.description:
-                    group_header += ' — ' + group.description
+                    group_header += " — " + group.description
                 rows.append((1, group_header))
                 rows.append((1, group_header))
                 if is_positional_group(group):
                 if is_positional_group(group):
                     for option in group._group_actions:
                     for option in group._group_actions:
-                        rows.append((3, '', '``%s``' % option.metavar, option.help or ''))
+                        rows.append((3, "", "``%s``" % option.metavar, option.help or ""))
                 else:
                 else:
                     for option in group._group_actions:
                     for option in group._group_actions:
                         if option.metavar:
                         if option.metavar:
-                            option_fmt = '``%s ' + option.metavar + '``'
+                            option_fmt = "``%s " + option.metavar + "``"
                         else:
                         else:
-                            option_fmt = '``%s``'
-                        option_str = ', '.join(option_fmt % s for s in option.option_strings)
-                        option_desc = textwrap.dedent((option.help or '') % option.__dict__)
-                        rows.append((3, '', option_str, option_desc))
+                            option_fmt = "``%s``"
+                        option_str = ", ".join(option_fmt % s for s in option.option_strings)
+                        option_desc = textwrap.dedent((option.help or "") % option.__dict__)
+                        rows.append((3, "", option_str, option_desc))
 
 
-        fp.write('.. only:: html\n\n')
+        fp.write(".. only:: html\n\n")
         table = io.StringIO()
         table = io.StringIO()
-        table.write('.. class:: borg-options-table\n\n')
+        table.write(".. class:: borg-options-table\n\n")
         self.rows_to_table(rows, table.write)
         self.rows_to_table(rows, table.write)
-        fp.write(textwrap.indent(table.getvalue(), ' ' * 4))
+        fp.write(textwrap.indent(table.getvalue(), " " * 4))
 
 
         # LaTeX output:
         # LaTeX output:
         # Regular rST option lists (irregular column widths)
         # Regular rST option lists (irregular column widths)
         latex_options = io.StringIO()
         latex_options = io.StringIO()
         for group in parser._action_groups:
         for group in parser._action_groups:
-            if group.title == 'Common options':
-                latex_options.write('\n\n:ref:`common_options`\n')
-                latex_options.write('    |')
+            if group.title == "Common options":
+                latex_options.write("\n\n:ref:`common_options`\n")
+                latex_options.write("    |")
             else:
             else:
                 self.write_options_group(group, latex_options)
                 self.write_options_group(group, latex_options)
-        fp.write('\n.. only:: latex\n\n')
-        fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))
+        fp.write("\n.. only:: latex\n\n")
+        fp.write(textwrap.indent(latex_options.getvalue(), " " * 4))
 
 
     def rows_to_table(self, rows, write):
     def rows_to_table(self, rows, write):
         def write_row_separator():
         def write_row_separator():
-            write('+')
+            write("+")
             for column_width in column_widths:
             for column_width in column_widths:
-                write('-' * (column_width + 1))
-                write('+')
-            write('\n')
+                write("-" * (column_width + 1))
+                write("+")
+            write("\n")
 
 
         # Find column count and width
         # Find column count and width
         column_count = max(columns for columns, *_ in rows)
         column_count = max(columns for columns, *_ in rows)
@@ -201,22 +206,22 @@ class build_usage(Command):
             # where each cell contains no newline.
             # where each cell contains no newline.
             rowspanning_cells = []
             rowspanning_cells = []
             original_cells = list(original_cells)
             original_cells = list(original_cells)
-            while any('\n' in cell for cell in original_cells):
+            while any("\n" in cell for cell in original_cells):
                 cell_bloc = []
                 cell_bloc = []
                 for i, cell in enumerate(original_cells):
                 for i, cell in enumerate(original_cells):
-                    pre, _, original_cells[i] = cell.partition('\n')
+                    pre, _, original_cells[i] = cell.partition("\n")
                     cell_bloc.append(pre)
                     cell_bloc.append(pre)
                 rowspanning_cells.append(cell_bloc)
                 rowspanning_cells.append(cell_bloc)
             rowspanning_cells.append(original_cells)
             rowspanning_cells.append(original_cells)
             for cells in rowspanning_cells:
             for cells in rowspanning_cells:
                 for i, column_width in enumerate(column_widths):
                 for i, column_width in enumerate(column_widths):
                     if i < columns:
                     if i < columns:
-                        write('| ')
+                        write("| ")
                         write(cells[i].ljust(column_width))
                         write(cells[i].ljust(column_width))
                     else:
                     else:
-                        write('  ')
-                        write(''.ljust(column_width))
-                write('|\n')
+                        write("  ")
+                        write("".ljust(column_width))
+                write("|\n")
 
 
         write_row_separator()
         write_row_separator()
         # This bit of JavaScript kills the <colgroup> that is invariably inserted by docutils,
         # This bit of JavaScript kills the <colgroup> that is invariably inserted by docutils,
@@ -224,7 +229,9 @@ class build_usage(Command):
         # with CSS alone.
         # with CSS alone.
         # Since this is HTML-only output, it would be possible to just generate a <table> directly,
         # Since this is HTML-only output, it would be possible to just generate a <table> directly,
         # but then we'd lose rST formatting.
         # but then we'd lose rST formatting.
-        write(textwrap.dedent("""
+        write(
+            textwrap.dedent(
+                """
         .. raw:: html
         .. raw:: html
 
 
             <script type='text/javascript'>
             <script type='text/javascript'>
@@ -232,88 +239,88 @@ class build_usage(Command):
                 $('.borg-options-table colgroup').remove();
                 $('.borg-options-table colgroup').remove();
             })
             })
             </script>
             </script>
-        """))
+        """
+            )
+        )
 
 
     def write_options_group(self, group, fp, with_title=True, base_indent=4):
     def write_options_group(self, group, fp, with_title=True, base_indent=4):
         def is_positional_group(group):
         def is_positional_group(group):
             return any(not o.option_strings for o in group._group_actions)
             return any(not o.option_strings for o in group._group_actions)
 
 
-        indent = ' ' * base_indent
+        indent = " " * base_indent
 
 
         if is_positional_group(group):
         if is_positional_group(group):
             for option in group._group_actions:
             for option in group._group_actions:
-                fp.write(option.metavar + '\n')
-                fp.write(textwrap.indent(option.help or '', ' ' * base_indent) + '\n')
+                fp.write(option.metavar + "\n")
+                fp.write(textwrap.indent(option.help or "", " " * base_indent) + "\n")
             return
             return
 
 
         if not group._group_actions:
         if not group._group_actions:
             return
             return
 
 
         if with_title:
         if with_title:
-            fp.write('\n\n')
-            fp.write(group.title + '\n')
+            fp.write("\n\n")
+            fp.write(group.title + "\n")
 
 
         opts = OrderedDict()
         opts = OrderedDict()
 
 
         for option in group._group_actions:
         for option in group._group_actions:
             if option.metavar:
             if option.metavar:
-                option_fmt = '%s ' + option.metavar
+                option_fmt = "%s " + option.metavar
             else:
             else:
-                option_fmt = '%s'
-            option_str = ', '.join(option_fmt % s for s in option.option_strings)
-            option_desc = textwrap.dedent((option.help or '') % option.__dict__)
-            opts[option_str] = textwrap.indent(option_desc, ' ' * 4)
+                option_fmt = "%s"
+            option_str = ", ".join(option_fmt % s for s in option.option_strings)
+            option_desc = textwrap.dedent((option.help or "") % option.__dict__)
+            opts[option_str] = textwrap.indent(option_desc, " " * 4)
 
 
         padding = len(max(opts)) + 1
         padding = len(max(opts)) + 1
 
 
         for option, desc in opts.items():
         for option, desc in opts.items():
-            fp.write(indent + option.ljust(padding) + desc + '\n')
+            fp.write(indent + option.ljust(padding) + desc + "\n")
 
 
 
 
 class build_man(Command):
 class build_man(Command):
-    description = 'build man pages'
+    description = "build man pages"
 
 
     user_options = []
     user_options = []
 
 
     see_also = {
     see_also = {
-        'create': ('delete', 'prune', 'check', 'patterns', 'placeholders', 'compression'),
-        'recreate': ('patterns', 'placeholders', 'compression'),
-        'list': ('info', 'diff', 'prune', 'patterns'),
-        'info': ('list', 'diff'),
-        'rcreate': ('rdelete', 'rlist', 'check', 'key-import', 'key-export', 'key-change-passphrase'),
-        'key-import': ('key-export', ),
-        'key-export': ('key-import', ),
-        'mount': ('umount', 'extract'),  # Would be cooler if these two were on the same page
-        'umount': ('mount', ),
-        'extract': ('mount', ),
-        'delete': ('compact', ),
-        'prune': ('compact', ),
+        "create": ("delete", "prune", "check", "patterns", "placeholders", "compression"),
+        "recreate": ("patterns", "placeholders", "compression"),
+        "list": ("info", "diff", "prune", "patterns"),
+        "info": ("list", "diff"),
+        "rcreate": ("rdelete", "rlist", "check", "key-import", "key-export", "key-change-passphrase"),
+        "key-import": ("key-export",),
+        "key-export": ("key-import",),
+        "mount": ("umount", "extract"),  # Would be cooler if these two were on the same page
+        "umount": ("mount",),
+        "extract": ("mount",),
+        "delete": ("compact",),
+        "prune": ("compact",),
     }
     }
 
 
-    rst_prelude = textwrap.dedent("""
+    rst_prelude = textwrap.dedent(
+        """
     .. role:: ref(title)
     .. role:: ref(title)
 
 
     .. |project_name| replace:: Borg
     .. |project_name| replace:: Borg
 
 
-    """)
+    """
+    )
 
 
     usage_group = {
     usage_group = {
-        'break-lock': 'lock',
-        'with-lock': 'lock',
-
-        'key_change-passphrase': 'key',
-        'key_change-location': 'key',
-        'key_export': 'key',
-        'key_import': 'key',
-        'key_migrate-to-repokey': 'key',
-
-        'export-tar': 'tar',
-        'import-tar': 'tar',
-
-        'benchmark_crud': 'benchmark',
-        'benchmark_cpu': 'benchmark',
-
-        'umount': 'mount',
+        "break-lock": "lock",
+        "with-lock": "lock",
+        "key_change-passphrase": "key",
+        "key_change-location": "key",
+        "key_export": "key",
+        "key_import": "key",
+        "key_migrate-to-repokey": "key",
+        "export-tar": "tar",
+        "import-tar": "tar",
+        "benchmark_crud": "benchmark",
+        "benchmark_cpu": "benchmark",
+        "umount": "mount",
     }
     }
 
 
     def initialize_options(self):
     def initialize_options(self):
@@ -323,16 +330,18 @@ class build_man(Command):
         pass
         pass
 
 
     def run(self):
     def run(self):
-        print('building man pages (in docs/man)', file=sys.stderr)
+        print("building man pages (in docs/man)", file=sys.stderr)
         import borg
         import borg
-        borg.doc_mode = 'build_man'
-        os.makedirs('docs/man', exist_ok=True)
+
+        borg.doc_mode = "build_man"
+        os.makedirs("docs/man", exist_ok=True)
         # allows us to build docs without the C modules fully loaded during help generation
         # allows us to build docs without the C modules fully loaded during help generation
         from borg.archiver import Archiver
         from borg.archiver import Archiver
-        parser = Archiver(prog='borg').build_parser()
-        borgfs_parser = Archiver(prog='borgfs').build_parser()
 
 
-        self.generate_level('', parser, Archiver, {'borgfs': borgfs_parser})
+        parser = Archiver(prog="borg").build_parser()
+        borgfs_parser = Archiver(prog="borgfs").build_parser()
+
+        self.generate_level("", parser, Archiver, {"borgfs": borgfs_parser})
         self.build_topic_pages(Archiver)
         self.build_topic_pages(Archiver)
         self.build_intro_page()
         self.build_intro_page()
 
 
@@ -340,7 +349,7 @@ class build_man(Command):
         is_subcommand = False
         is_subcommand = False
         choices = {}
         choices = {}
         for action in parser._actions:
         for action in parser._actions:
-            if action.choices is not None and 'SubParsersAction' in str(action.__class__):
+            if action.choices is not None and "SubParsersAction" in str(action.__class__):
                 is_subcommand = True
                 is_subcommand = True
                 for cmd, parser in action.choices.items():
                 for cmd, parser in action.choices.items():
                     choices[prefix + cmd] = parser
                     choices[prefix + cmd] = parser
@@ -350,50 +359,50 @@ class build_man(Command):
             return
             return
 
 
         for command, parser in sorted(choices.items()):
         for command, parser in sorted(choices.items()):
-            if command.startswith('debug') or command == 'help':
+            if command.startswith("debug") or command == "help":
                 continue
                 continue
 
 
             if command == "borgfs":
             if command == "borgfs":
                 man_title = command
                 man_title = command
             else:
             else:
-                man_title = 'borg-' + command.replace(' ', '-')
-            print('building man page', man_title + '(1)', file=sys.stderr)
+                man_title = "borg-" + command.replace(" ", "-")
+            print("building man page", man_title + "(1)", file=sys.stderr)
 
 
-            is_intermediary = self.generate_level(command + ' ', parser, Archiver)
+            is_intermediary = self.generate_level(command + " ", parser, Archiver)
 
 
             doc, write = self.new_doc()
             doc, write = self.new_doc()
             self.write_man_header(write, man_title, parser.description)
             self.write_man_header(write, man_title, parser.description)
 
 
-            self.write_heading(write, 'SYNOPSIS')
+            self.write_heading(write, "SYNOPSIS")
             if is_intermediary:
             if is_intermediary:
-                subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]
+                subparsers = [action for action in parser._actions if "SubParsersAction" in str(action.__class__)][0]
                 for subcommand in subparsers.choices:
                 for subcommand in subparsers.choices:
-                    write('| borg', '[common options]', command, subcommand, '...')
-                    self.see_also.setdefault(command, []).append(f'{command}-{subcommand}')
+                    write("| borg", "[common options]", command, subcommand, "...")
+                    self.see_also.setdefault(command, []).append(f"{command}-{subcommand}")
             else:
             else:
                 if command == "borgfs":
                 if command == "borgfs":
-                    write(command, end='')
+                    write(command, end="")
                 else:
                 else:
-                    write('borg', '[common options]', command, end='')
+                    write("borg", "[common options]", command, end="")
                 self.write_usage(write, parser)
                 self.write_usage(write, parser)
-            write('\n')
+            write("\n")
 
 
-            description, _, notes = parser.epilog.partition('\n.. man NOTES')
+            description, _, notes = parser.epilog.partition("\n.. man NOTES")
 
 
             if description:
             if description:
-                self.write_heading(write, 'DESCRIPTION')
+                self.write_heading(write, "DESCRIPTION")
                 write(description)
                 write(description)
 
 
             if not is_intermediary:
             if not is_intermediary:
-                self.write_heading(write, 'OPTIONS')
-                write('See `borg-common(1)` for common options of Borg commands.')
+                self.write_heading(write, "OPTIONS")
+                write("See `borg-common(1)` for common options of Borg commands.")
                 write()
                 write()
                 self.write_options(write, parser)
                 self.write_options(write, parser)
 
 
                 self.write_examples(write, command)
                 self.write_examples(write, command)
 
 
             if notes:
             if notes:
-                self.write_heading(write, 'NOTES')
+                self.write_heading(write, "NOTES")
                 write(notes)
                 write(notes)
 
 
             self.write_see_also(write, man_title)
             self.write_see_also(write, man_title)
@@ -401,14 +410,14 @@ class build_man(Command):
             self.gen_man_page(man_title, doc.getvalue())
             self.gen_man_page(man_title, doc.getvalue())
 
 
         # Generate the borg-common(1) man page with the common options.
         # Generate the borg-common(1) man page with the common options.
-        if 'create' in choices:
+        if "create" in choices:
             doc, write = self.new_doc()
             doc, write = self.new_doc()
-            man_title = 'borg-common'
-            self.write_man_header(write, man_title, 'Common options of Borg commands')
+            man_title = "borg-common"
+            self.write_man_header(write, man_title, "Common options of Borg commands")
 
 
-            common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
+            common_options = [group for group in choices["create"]._action_groups if group.title == "Common options"][0]
 
 
-            self.write_heading(write, 'SYNOPSIS')
+            self.write_heading(write, "SYNOPSIS")
             self.write_options_group(write, common_options)
             self.write_options_group(write, common_options)
             self.write_see_also(write, man_title)
             self.write_see_also(write, man_title)
             self.gen_man_page(man_title, doc.getvalue())
             self.gen_man_page(man_title, doc.getvalue())
@@ -418,20 +427,20 @@ class build_man(Command):
     def build_topic_pages(self, Archiver):
     def build_topic_pages(self, Archiver):
         for topic, text in Archiver.helptext.items():
         for topic, text in Archiver.helptext.items():
             doc, write = self.new_doc()
             doc, write = self.new_doc()
-            man_title = 'borg-' + topic
-            print('building man page', man_title + '(1)', file=sys.stderr)
+            man_title = "borg-" + topic
+            print("building man page", man_title + "(1)", file=sys.stderr)
 
 
-            self.write_man_header(write, man_title, 'Details regarding ' + topic)
-            self.write_heading(write, 'DESCRIPTION')
+            self.write_man_header(write, man_title, "Details regarding " + topic)
+            self.write_heading(write, "DESCRIPTION")
             write(text)
             write(text)
             self.gen_man_page(man_title, doc.getvalue())
             self.gen_man_page(man_title, doc.getvalue())
 
 
     def build_intro_page(self):
     def build_intro_page(self):
         doc, write = self.new_doc()
         doc, write = self.new_doc()
-        man_title = 'borg'
-        print('building man page borg(1)', file=sys.stderr)
+        man_title = "borg"
+        print("building man page borg(1)", file=sys.stderr)
 
 
-        with open('docs/man_intro.rst') as fd:
+        with open("docs/man_intro.rst") as fd:
             man_intro = fd.read()
             man_intro = fd.read()
 
 
         self.write_man_header(write, man_title, "deduplicating and encrypting backup tool")
         self.write_man_header(write, man_title, "deduplicating and encrypting backup tool")
@@ -446,9 +455,10 @@ class build_man(Command):
     def printer(self, fd):
     def printer(self, fd):
         def write(*args, **kwargs):
         def write(*args, **kwargs):
             print(*args, file=fd, **kwargs)
             print(*args, file=fd, **kwargs)
+
         return write
         return write
 
 
-    def write_heading(self, write, header, char='-', double_sided=False):
+    def write_heading(self, write, header, char="-", double_sided=False):
         write()
         write()
         if double_sided:
         if double_sided:
             write(char * len(header))
             write(char * len(header))
@@ -457,43 +467,43 @@ class build_man(Command):
         write()
         write()
 
 
     def write_man_header(self, write, title, description):
     def write_man_header(self, write, title, description):
-        self.write_heading(write, title, '=', double_sided=True)
+        self.write_heading(write, title, "=", double_sided=True)
         self.write_heading(write, description, double_sided=True)
         self.write_heading(write, description, double_sided=True)
         # man page metadata
         # man page metadata
-        write(':Author: The Borg Collective')
-        write(':Date:', datetime.utcnow().date().isoformat())
-        write(':Manual section: 1')
-        write(':Manual group: borg backup tool')
+        write(":Author: The Borg Collective")
+        write(":Date:", datetime.utcnow().date().isoformat())
+        write(":Manual section: 1")
+        write(":Manual group: borg backup tool")
         write()
         write()
 
 
     def write_examples(self, write, command):
     def write_examples(self, write, command):
-        command = command.replace(' ', '_')
-        with open('docs/usage/%s.rst' % self.usage_group.get(command, command)) as fd:
+        command = command.replace(" ", "_")
+        with open("docs/usage/%s.rst" % self.usage_group.get(command, command)) as fd:
             usage = fd.read()
             usage = fd.read()
-            usage_include = '.. include:: %s.rst.inc' % command
+            usage_include = ".. include:: %s.rst.inc" % command
             begin = usage.find(usage_include)
             begin = usage.find(usage_include)
-            end = usage.find('.. include', begin + 1)
+            end = usage.find(".. include", begin + 1)
             # If a command has a dedicated anchor, it will occur before the command's include.
             # If a command has a dedicated anchor, it will occur before the command's include.
-            if 0 < usage.find('.. _', begin + 1) < end:
-                end = usage.find('.. _', begin + 1)
+            if 0 < usage.find(".. _", begin + 1) < end:
+                end = usage.find(".. _", begin + 1)
             examples = usage[begin:end]
             examples = usage[begin:end]
-            examples = examples.replace(usage_include, '')
-            examples = examples.replace('Examples\n~~~~~~~~', '')
-            examples = examples.replace('Miscellaneous Help\n------------------', '')
-            examples = examples.replace('``docs/misc/prune-example.txt``:', '``docs/misc/prune-example.txt``.')
-            examples = examples.replace('.. highlight:: none\n', '')  # we don't support highlight
-            examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE)
+            examples = examples.replace(usage_include, "")
+            examples = examples.replace("Examples\n~~~~~~~~", "")
+            examples = examples.replace("Miscellaneous Help\n------------------", "")
+            examples = examples.replace("``docs/misc/prune-example.txt``:", "``docs/misc/prune-example.txt``.")
+            examples = examples.replace(".. highlight:: none\n", "")  # we don't support highlight
+            examples = re.sub("^(~+)$", lambda matches: "+" * len(matches.group(0)), examples, flags=re.MULTILINE)
             examples = examples.strip()
             examples = examples.strip()
         if examples:
         if examples:
-            self.write_heading(write, 'EXAMPLES', '-')
+            self.write_heading(write, "EXAMPLES", "-")
             write(examples)
             write(examples)
 
 
     def write_see_also(self, write, man_title):
     def write_see_also(self, write, man_title):
-        see_also = self.see_also.get(man_title.replace('borg-', ''), ())
-        see_also = ['`borg-%s(1)`' % s for s in see_also]
-        see_also.insert(0, '`borg-common(1)`')
-        self.write_heading(write, 'SEE ALSO')
-        write(', '.join(see_also))
+        see_also = self.see_also.get(man_title.replace("borg-", ""), ())
+        see_also = ["`borg-%s(1)`" % s for s in see_also]
+        see_also.insert(0, "`borg-common(1)`")
+        self.write_heading(write, "SEE ALSO")
+        write(", ".join(see_also))
 
 
     def gen_man_page(self, name, rst):
     def gen_man_page(self, name, rst):
         from docutils.writers import manpage
         from docutils.writers import manpage
@@ -502,29 +512,29 @@ class build_man(Command):
         from docutils.parsers.rst import roles
         from docutils.parsers.rst import roles
 
 
         def issue(name, rawtext, text, lineno, inliner, options={}, content=[]):
         def issue(name, rawtext, text, lineno, inliner, options={}, content=[]):
-            return [inline(rawtext, '#' + text)], []
+            return [inline(rawtext, "#" + text)], []
 
 
-        roles.register_local_role('issue', issue)
+        roles.register_local_role("issue", issue)
         # We give the source_path so that docutils can find relative includes
         # We give the source_path so that docutils can find relative includes
         # as-if the document where located in the docs/ directory.
         # as-if the document where located in the docs/ directory.
-        man_page = publish_string(source=rst, source_path='docs/%s.rst' % name, writer=manpage.Writer())
-        with open('docs/man/%s.1' % name, 'wb') as fd:
+        man_page = publish_string(source=rst, source_path="docs/%s.rst" % name, writer=manpage.Writer())
+        with open("docs/man/%s.1" % name, "wb") as fd:
             fd.write(man_page)
             fd.write(man_page)
 
 
     def write_usage(self, write, parser):
     def write_usage(self, write, parser):
         if any(len(o.option_strings) for o in parser._actions):
         if any(len(o.option_strings) for o in parser._actions):
-            write(' [options] ', end='')
+            write(" [options] ", end="")
         for option in parser._actions:
         for option in parser._actions:
             if option.option_strings:
             if option.option_strings:
                 continue
                 continue
-            write(format_metavar(option), end=' ')
+            write(format_metavar(option), end=" ")
 
 
     def write_options(self, write, parser):
     def write_options(self, write, parser):
         for group in parser._action_groups:
         for group in parser._action_groups:
-            if group.title == 'Common options' or not group._group_actions:
+            if group.title == "Common options" or not group._group_actions:
                 continue
                 continue
-            title = 'arguments' if group.title == 'positional arguments' else group.title
-            self.write_heading(write, title, '+')
+            title = "arguments" if group.title == "positional arguments" else group.title
+            self.write_heading(write, title, "+")
             self.write_options_group(write, group)
             self.write_options_group(write, group)
 
 
     def write_options_group(self, write, group):
     def write_options_group(self, write, group):
@@ -534,19 +544,19 @@ class build_man(Command):
         if is_positional_group(group):
         if is_positional_group(group):
             for option in group._group_actions:
             for option in group._group_actions:
                 write(option.metavar)
                 write(option.metavar)
-                write(textwrap.indent(option.help or '', ' ' * 4))
+                write(textwrap.indent(option.help or "", " " * 4))
             return
             return
 
 
         opts = OrderedDict()
         opts = OrderedDict()
 
 
         for option in group._group_actions:
         for option in group._group_actions:
             if option.metavar:
             if option.metavar:
-                option_fmt = '%s ' + option.metavar
+                option_fmt = "%s " + option.metavar
             else:
             else:
-                option_fmt = '%s'
-            option_str = ', '.join(option_fmt % s for s in option.option_strings)
-            option_desc = textwrap.dedent((option.help or '') % option.__dict__)
-            opts[option_str] = textwrap.indent(option_desc, ' ' * 4)
+                option_fmt = "%s"
+            option_str = ", ".join(option_fmt % s for s in option.option_strings)
+            option_desc = textwrap.dedent((option.help or "") % option.__dict__)
+            opts[option_str] = textwrap.indent(option_desc, " " * 4)
 
 
         padding = len(max(opts)) + 1
         padding = len(max(opts)) + 1
 
 

+ 4 - 2
src/borg/__init__.py

@@ -9,11 +9,13 @@ __version_tuple__ = _v._version.release
 # assert that all semver components are integers
 # assert that all semver components are integers
 # this is mainly to show errors when people repackage poorly
 # this is mainly to show errors when people repackage poorly
 # and setuptools_scm determines a 0.1.dev... version
 # and setuptools_scm determines a 0.1.dev... version
-assert all(isinstance(v, int) for v in __version_tuple__), \
+assert all(isinstance(v, int) for v in __version_tuple__), (
     """\
     """\
 broken borgbackup version metadata: %r
 broken borgbackup version metadata: %r
 
 
 version metadata is obtained dynamically on installation via setuptools_scm,
 version metadata is obtained dynamically on installation via setuptools_scm,
 please ensure your git repo has the correct tags or you provide the version
 please ensure your git repo has the correct tags or you provide the version
 using SETUPTOOLS_SCM_PRETEND_VERSION in your build script.
 using SETUPTOOLS_SCM_PRETEND_VERSION in your build script.
-""" % __version__
+"""
+    % __version__
+)

+ 4 - 3
src/borg/__main__.py

@@ -5,11 +5,12 @@ import os
 # containing the dll is not in the search path. The dll is shipped
 # containing the dll is not in the search path. The dll is shipped
 # with python in the "DLLs" folder, so let's add this folder
 # with python in the "DLLs" folder, so let's add this folder
 # to the path. The folder is always in sys.path, get it from there.
 # to the path. The folder is always in sys.path, get it from there.
-if sys.platform.startswith('win32'):
+if sys.platform.startswith("win32"):
     # Keep it an iterable to support multiple folder which contain "DLLs".
     # Keep it an iterable to support multiple folder which contain "DLLs".
-    dll_path = (p for p in sys.path if 'DLLs' in os.path.normpath(p).split(os.path.sep))
-    os.environ['PATH'] = os.pathsep.join(dll_path) + os.pathsep + os.environ['PATH']
+    dll_path = (p for p in sys.path if "DLLs" in os.path.normpath(p).split(os.path.sep))
+    os.environ["PATH"] = os.pathsep.join(dll_path) + os.pathsep + os.environ["PATH"]
 
 
 
 
 from borg.archiver import main
 from borg.archiver import main
+
 main()
 main()

Diff do ficheiro suprimidas por serem muito extensas
+ 246 - 191
src/borg/archive.py


Diff do ficheiro suprimidas por serem muito extensas
+ 370 - 223
src/borg/archiver.py


Diff do ficheiro suprimidas por serem muito extensas
+ 304 - 213
src/borg/cache.py


+ 19 - 19
src/borg/constants.py

@@ -7,7 +7,7 @@ ITEM_KEYS = frozenset(['path', 'source', 'rdev', 'chunks', 'chunks_healthy', 'ha
 # fmt: on
 # fmt: on
 
 
 # this is the set of keys that are always present in items:
 # this is the set of keys that are always present in items:
-REQUIRED_ITEM_KEYS = frozenset(['path', 'mtime', ])
+REQUIRED_ITEM_KEYS = frozenset(["path", "mtime"])
 
 
 # this set must be kept complete, otherwise rebuild_manifest might malfunction:
 # this set must be kept complete, otherwise rebuild_manifest might malfunction:
 # fmt: off
 # fmt: off
@@ -19,7 +19,7 @@ ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'hostname', 'us
 # fmt: on
 # fmt: on
 
 
 # this is the set of keys that are always present in archives:
 # this is the set of keys that are always present in archives:
-REQUIRED_ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'time', ])
+REQUIRED_ARCHIVE_KEYS = frozenset(["version", "name", "items", "cmdline", "time"])
 
 
 # default umask, overridden by --umask, defaults to read/write only for owner
 # default umask, overridden by --umask, defaults to read/write only for owner
 UMASK_DEFAULT = 0o077
 UMASK_DEFAULT = 0o077
@@ -28,8 +28,8 @@ UMASK_DEFAULT = 0o077
 # forcing to 0o100XXX later
 # forcing to 0o100XXX later
 STDIN_MODE_DEFAULT = 0o660
 STDIN_MODE_DEFAULT = 0o660
 
 
-CACHE_TAG_NAME = 'CACHEDIR.TAG'
-CACHE_TAG_CONTENTS = b'Signature: 8a477f597d28d172789f06886806bc55'
+CACHE_TAG_NAME = "CACHEDIR.TAG"
+CACHE_TAG_CONTENTS = b"Signature: 8a477f597d28d172789f06886806bc55"
 
 
 # A large, but not unreasonably large segment size. Always less than 2 GiB (for legacy file systems). We choose
 # A large, but not unreasonably large segment size. Always less than 2 GiB (for legacy file systems). We choose
 # 500 MiB which means that no indirection from the inode is needed for typical Linux file systems.
 # 500 MiB which means that no indirection from the inode is needed for typical Linux file systems.
@@ -48,7 +48,7 @@ MAX_DATA_SIZE = 20971479
 MAX_OBJECT_SIZE = MAX_DATA_SIZE + 41 + 8  # see assertion at end of repository module
 MAX_OBJECT_SIZE = MAX_DATA_SIZE + 41 + 8  # see assertion at end of repository module
 
 
 # repo config max_segment_size value must be below this limit to stay within uint32 offsets:
 # repo config max_segment_size value must be below this limit to stay within uint32 offsets:
-MAX_SEGMENT_SIZE_LIMIT = 2 ** 32 - MAX_OBJECT_SIZE
+MAX_SEGMENT_SIZE_LIMIT = 2**32 - MAX_OBJECT_SIZE
 
 
 # have one all-zero bytes object
 # have one all-zero bytes object
 # we use it at all places where we need to detect or create all-zero buffers
 # we use it at all places where we need to detect or create all-zero buffers
@@ -71,12 +71,12 @@ FD_MAX_AGE = 4 * 60  # 4 minutes
 
 
 CHUNK_MIN_EXP = 19  # 2**19 == 512kiB
 CHUNK_MIN_EXP = 19  # 2**19 == 512kiB
 CHUNK_MAX_EXP = 23  # 2**23 == 8MiB
 CHUNK_MAX_EXP = 23  # 2**23 == 8MiB
-HASH_WINDOW_SIZE = 0xfff  # 4095B
+HASH_WINDOW_SIZE = 0xFFF  # 4095B
 HASH_MASK_BITS = 21  # results in ~2MiB chunks statistically
 HASH_MASK_BITS = 21  # results in ~2MiB chunks statistically
 
 
 # chunker algorithms
 # chunker algorithms
-CH_BUZHASH = 'buzhash'
-CH_FIXED = 'fixed'
+CH_BUZHASH = "buzhash"
+CH_FIXED = "fixed"
 
 
 # defaults, use --chunker-params to override
 # defaults, use --chunker-params to override
 CHUNKER_PARAMS = (CH_BUZHASH, CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE)
 CHUNKER_PARAMS = (CH_BUZHASH, CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE)
@@ -88,8 +88,8 @@ ITEMS_CHUNKER_PARAMS = (CH_BUZHASH, 15, 19, 17, HASH_WINDOW_SIZE)
 CH_DATA, CH_ALLOC, CH_HOLE = 0, 1, 2
 CH_DATA, CH_ALLOC, CH_HOLE = 0, 1, 2
 
 
 # operating mode of the files cache (for fast skipping of unchanged files)
 # operating mode of the files cache (for fast skipping of unchanged files)
-FILES_CACHE_MODE_UI_DEFAULT = 'ctime,size,inode'  # default for "borg create" command (CLI UI)
-FILES_CACHE_MODE_DISABLED = 'd'  # most borg commands do not use the files cache at all (disable)
+FILES_CACHE_MODE_UI_DEFAULT = "ctime,size,inode"  # default for "borg create" command (CLI UI)
+FILES_CACHE_MODE_DISABLED = "d"  # most borg commands do not use the files cache at all (disable)
 
 
 # return codes returned by borg command
 # return codes returned by borg command
 # when borg is killed by signal N, rc = 128 + N
 # when borg is killed by signal N, rc = 128 + N
@@ -101,30 +101,30 @@ EXIT_SIGNAL_BASE = 128  # terminated due to signal, rc = 128 + sig_no
 # never use datetime.isoformat(), it is evil. always use one of these:
 # never use datetime.isoformat(), it is evil. always use one of these:
 # datetime.strftime(ISO_FORMAT)  # output always includes .microseconds
 # datetime.strftime(ISO_FORMAT)  # output always includes .microseconds
 # datetime.strftime(ISO_FORMAT_NO_USECS)  # output never includes microseconds
 # datetime.strftime(ISO_FORMAT_NO_USECS)  # output never includes microseconds
-ISO_FORMAT_NO_USECS = '%Y-%m-%dT%H:%M:%S'
-ISO_FORMAT = ISO_FORMAT_NO_USECS + '.%f'
+ISO_FORMAT_NO_USECS = "%Y-%m-%dT%H:%M:%S"
+ISO_FORMAT = ISO_FORMAT_NO_USECS + ".%f"
 
 
-DASHES = '-' * 78
+DASHES = "-" * 78
 
 
 PBKDF2_ITERATIONS = 100000
 PBKDF2_ITERATIONS = 100000
 
 
 # https://www.rfc-editor.org/rfc/rfc9106.html#section-4-6.2
 # https://www.rfc-editor.org/rfc/rfc9106.html#section-4-6.2
-ARGON2_ARGS = {'time_cost': 3, 'memory_cost': 2**16, 'parallelism': 4, 'type': 'id'}
+ARGON2_ARGS = {"time_cost": 3, "memory_cost": 2**16, "parallelism": 4, "type": "id"}
 ARGON2_SALT_BYTES = 16
 ARGON2_SALT_BYTES = 16
 
 
 # Maps the CLI argument to our internal identifier for the format
 # Maps the CLI argument to our internal identifier for the format
 KEY_ALGORITHMS = {
 KEY_ALGORITHMS = {
     # encrypt-and-MAC, kdf: PBKDF2(HMAC−SHA256), encryption: AES256-CTR, authentication: HMAC-SHA256
     # encrypt-and-MAC, kdf: PBKDF2(HMAC−SHA256), encryption: AES256-CTR, authentication: HMAC-SHA256
-    'pbkdf2': 'sha256',
+    "pbkdf2": "sha256",
     # encrypt-then-MAC, kdf: argon2, encryption: chacha20, authentication: poly1305
     # encrypt-then-MAC, kdf: argon2, encryption: chacha20, authentication: poly1305
-    'argon2': 'argon2 chacha20-poly1305',
+    "argon2": "argon2 chacha20-poly1305",
 }
 }
 
 
 
 
 class KeyBlobStorage:
 class KeyBlobStorage:
-    NO_STORAGE = 'no_storage'
-    KEYFILE = 'keyfile'
-    REPO = 'repository'
+    NO_STORAGE = "no_storage"
+    KEYFILE = "keyfile"
+    REPO = "repository"
 
 
 
 
 class KeyType:
 class KeyType:

+ 19 - 22
src/borg/crypto/file_integrity.py

@@ -102,12 +102,12 @@ class FileHashingWrapper(FileLikeWrapper):
 
 
 
 
 class SHA512FileHashingWrapper(FileHashingWrapper):
 class SHA512FileHashingWrapper(FileHashingWrapper):
-    ALGORITHM = 'SHA512'
+    ALGORITHM = "SHA512"
     FACTORY = hashlib.sha512
     FACTORY = hashlib.sha512
 
 
 
 
 class XXH64FileHashingWrapper(FileHashingWrapper):
 class XXH64FileHashingWrapper(FileHashingWrapper):
-    ALGORITHM = 'XXH64'
+    ALGORITHM = "XXH64"
     FACTORY = StreamingXXH64
     FACTORY = StreamingXXH64
 
 
 
 
@@ -125,7 +125,7 @@ class IntegrityCheckedFile(FileLikeWrapper):
     def __init__(self, path, write, filename=None, override_fd=None, integrity_data=None):
     def __init__(self, path, write, filename=None, override_fd=None, integrity_data=None):
         self.path = path
         self.path = path
         self.writing = write
         self.writing = write
-        mode = 'wb' if write else 'rb'
+        mode = "wb" if write else "rb"
         self.file_fd = override_fd or open(path, mode)
         self.file_fd = override_fd or open(path, mode)
         self.digests = {}
         self.digests = {}
 
 
@@ -155,7 +155,7 @@ class IntegrityCheckedFile(FileLikeWrapper):
         # While Borg does not use anything except ASCII in these file names, it's important to use
         # While Borg does not use anything except ASCII in these file names, it's important to use
         # the same encoding everywhere for portability. Using os.fsencode() would be wrong.
         # the same encoding everywhere for portability. Using os.fsencode() would be wrong.
         filename = os.path.basename(filename or self.path)
         filename = os.path.basename(filename or self.path)
-        self.hasher.update(('%10d' % len(filename)).encode())
+        self.hasher.update(("%10d" % len(filename)).encode())
         self.hasher.update(filename.encode())
         self.hasher.update(filename.encode())
 
 
     @classmethod
     @classmethod
@@ -163,44 +163,41 @@ class IntegrityCheckedFile(FileLikeWrapper):
         try:
         try:
             integrity_data = json.loads(data)
             integrity_data = json.loads(data)
             # Provisions for agility now, implementation later, but make sure the on-disk joint is oiled.
             # Provisions for agility now, implementation later, but make sure the on-disk joint is oiled.
-            algorithm = integrity_data['algorithm']
+            algorithm = integrity_data["algorithm"]
             if algorithm not in SUPPORTED_ALGORITHMS:
             if algorithm not in SUPPORTED_ALGORITHMS:
-                logger.warning('Cannot verify integrity of %s: Unknown algorithm %r', path, algorithm)
+                logger.warning("Cannot verify integrity of %s: Unknown algorithm %r", path, algorithm)
                 return
                 return
-            digests = integrity_data['digests']
+            digests = integrity_data["digests"]
             # Require at least presence of the final digest
             # Require at least presence of the final digest
-            digests['final']
+            digests["final"]
             return algorithm, digests
             return algorithm, digests
         except (ValueError, TypeError, KeyError) as e:
         except (ValueError, TypeError, KeyError) as e:
-            logger.warning('Could not parse integrity data for %s: %s', path, e)
+            logger.warning("Could not parse integrity data for %s: %s", path, e)
             raise FileIntegrityError(path)
             raise FileIntegrityError(path)
 
 
     def hash_part(self, partname, is_final=False):
     def hash_part(self, partname, is_final=False):
         if not self.writing and not self.digests:
         if not self.writing and not self.digests:
             return
             return
-        self.hasher.update(('%10d' % len(partname)).encode())
+        self.hasher.update(("%10d" % len(partname)).encode())
         self.hasher.update(partname.encode())
         self.hasher.update(partname.encode())
         self.hasher.hash_length(seek_to_end=is_final)
         self.hasher.hash_length(seek_to_end=is_final)
         digest = self.hasher.hexdigest()
         digest = self.hasher.hexdigest()
         if self.writing:
         if self.writing:
             self.digests[partname] = digest
             self.digests[partname] = digest
-        elif self.digests and not compare_digest(self.digests.get(partname, ''), digest):
+        elif self.digests and not compare_digest(self.digests.get(partname, ""), digest):
             raise FileIntegrityError(self.path)
             raise FileIntegrityError(self.path)
 
 
     def __exit__(self, exc_type, exc_val, exc_tb):
     def __exit__(self, exc_type, exc_val, exc_tb):
         exception = exc_type is not None
         exception = exc_type is not None
         if not exception:
         if not exception:
-            self.hash_part('final', is_final=True)
+            self.hash_part("final", is_final=True)
         self.hasher.__exit__(exc_type, exc_val, exc_tb)
         self.hasher.__exit__(exc_type, exc_val, exc_tb)
         if exception:
         if exception:
             return
             return
         if self.writing:
         if self.writing:
-            self.store_integrity_data(json.dumps({
-                'algorithm': self.hasher.ALGORITHM,
-                'digests': self.digests,
-            }))
+            self.store_integrity_data(json.dumps({"algorithm": self.hasher.ALGORITHM, "digests": self.digests}))
         elif self.digests:
         elif self.digests:
-            logger.debug('Verified integrity of %s', self.path)
+            logger.debug("Verified integrity of %s", self.path)
 
 
     def store_integrity_data(self, data: str):
     def store_integrity_data(self, data: str):
         self.integrity_data = data
         self.integrity_data = data
@@ -214,12 +211,12 @@ class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
         self.output_integrity_file = self.integrity_file_path(os.path.join(output_dir, filename))
         self.output_integrity_file = self.integrity_file_path(os.path.join(output_dir, filename))
 
 
     def load_integrity_data(self, path, integrity_data):
     def load_integrity_data(self, path, integrity_data):
-        assert not integrity_data, 'Cannot pass explicit integrity_data to DetachedIntegrityCheckedFile'
+        assert not integrity_data, "Cannot pass explicit integrity_data to DetachedIntegrityCheckedFile"
         return self.read_integrity_file(self.path)
         return self.read_integrity_file(self.path)
 
 
     @staticmethod
     @staticmethod
     def integrity_file_path(path):
     def integrity_file_path(path):
-        return path + '.integrity'
+        return path + ".integrity"
 
 
     @classmethod
     @classmethod
     def read_integrity_file(cls, path):
     def read_integrity_file(cls, path):
@@ -227,11 +224,11 @@ class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
             with open(cls.integrity_file_path(path)) as fd:
             with open(cls.integrity_file_path(path)) as fd:
                 return cls.parse_integrity_data(path, fd.read())
                 return cls.parse_integrity_data(path, fd.read())
         except FileNotFoundError:
         except FileNotFoundError:
-            logger.info('No integrity file found for %s', path)
+            logger.info("No integrity file found for %s", path)
         except OSError as e:
         except OSError as e:
-            logger.warning('Could not read integrity file for %s: %s', path, e)
+            logger.warning("Could not read integrity file for %s: %s", path, e)
             raise FileIntegrityError(path)
             raise FileIntegrityError(path)
 
 
     def store_integrity_data(self, data: str):
     def store_integrity_data(self, data: str):
-        with open(self.output_integrity_file, 'w') as fd:
+        with open(self.output_integrity_file, "w") as fd:
             fd.write(data)
             fd.write(data)

+ 140 - 154
src/borg/crypto/key.py

@@ -59,9 +59,11 @@ class UnsupportedKeyFormatError(Error):
 
 
 
 
 class TAMRequiredError(IntegrityError):
 class TAMRequiredError(IntegrityError):
-    __doc__ = textwrap.dedent("""
+    __doc__ = textwrap.dedent(
+        """
     Manifest is unauthenticated, but it is required for this repository. Is somebody attacking you?
     Manifest is unauthenticated, but it is required for this repository. Is somebody attacking you?
-    """).strip()
+    """
+    ).strip()
     traceback = False
     traceback = False
 
 
 
 
@@ -71,11 +73,12 @@ class TAMInvalid(IntegrityError):
 
 
     def __init__(self):
     def __init__(self):
         # Error message becomes: "Data integrity error: Manifest authentication did not verify"
         # Error message becomes: "Data integrity error: Manifest authentication did not verify"
-        super().__init__('Manifest authentication did not verify')
+        super().__init__("Manifest authentication did not verify")
 
 
 
 
 class TAMUnsupportedSuiteError(IntegrityError):
 class TAMUnsupportedSuiteError(IntegrityError):
     """Could not verify manifest: Unsupported suite {!r}; a newer version is needed."""
     """Could not verify manifest: Unsupported suite {!r}; a newer version is needed."""
+
     traceback = False
     traceback = False
 
 
 
 
@@ -110,7 +113,7 @@ def key_factory(repository, manifest_data):
 
 
 def tam_required_file(repository):
 def tam_required_file(repository):
     security_dir = get_security_dir(bin_to_hex(repository.id))
     security_dir = get_security_dir(bin_to_hex(repository.id))
-    return os.path.join(security_dir, 'tam_required')
+    return os.path.join(security_dir, "tam_required")
 
 
 
 
 def tam_required(repository):
 def tam_required(repository):
@@ -126,9 +129,10 @@ def uses_same_id_hash(other_key, key):
     old_blake2_ids = (Blake2RepoKey, Blake2KeyfileKey)
     old_blake2_ids = (Blake2RepoKey, Blake2KeyfileKey)
     new_blake2_ids = (Blake2AESOCBRepoKey, Blake2AESOCBKeyfileKey, Blake2CHPORepoKey, Blake2CHPOKeyfileKey)
     new_blake2_ids = (Blake2AESOCBRepoKey, Blake2AESOCBKeyfileKey, Blake2CHPORepoKey, Blake2CHPOKeyfileKey)
     same_ids = (
     same_ids = (
-        isinstance(other_key, old_hmac_sha256_ids + new_hmac_sha256_ids) and isinstance(key, new_hmac_sha256_ids)
-        or
-        isinstance(other_key, old_blake2_ids + new_blake2_ids) and isinstance(key, new_blake2_ids)
+        isinstance(other_key, old_hmac_sha256_ids + new_hmac_sha256_ids)
+        and isinstance(key, new_hmac_sha256_ids)
+        or isinstance(other_key, old_blake2_ids + new_blake2_ids)
+        and isinstance(key, new_blake2_ids)
     )
     )
     return same_ids
     return same_ids
 
 
@@ -140,10 +144,10 @@ class KeyBase:
     TYPES_ACCEPTABLE = None  # override in subclasses
     TYPES_ACCEPTABLE = None  # override in subclasses
 
 
     # Human-readable name
     # Human-readable name
-    NAME = 'UNDEFINED'
+    NAME = "UNDEFINED"
 
 
     # Name used in command line / API (e.g. borg init --encryption=...)
     # Name used in command line / API (e.g. borg init --encryption=...)
-    ARG_NAME = 'UNDEFINED'
+    ARG_NAME = "UNDEFINED"
 
 
     # Storage type (no key blob storage / keyfile / repo)
     # Storage type (no key blob storage / keyfile / repo)
     STORAGE = KeyBlobStorage.NO_STORAGE
     STORAGE = KeyBlobStorage.NO_STORAGE
@@ -167,13 +171,12 @@ class KeyBase:
         self.target = None  # key location file path / repo obj
         self.target = None  # key location file path / repo obj
         # Some commands write new chunks (e.g. rename) but don't take a --compression argument. This duplicates
         # Some commands write new chunks (e.g. rename) but don't take a --compression argument. This duplicates
         # the default used by those commands who do take a --compression argument.
         # the default used by those commands who do take a --compression argument.
-        self.compressor = Compressor('lz4')
+        self.compressor = Compressor("lz4")
         self.decompress = self.compressor.decompress
         self.decompress = self.compressor.decompress
         self.tam_required = True
         self.tam_required = True
 
 
     def id_hash(self, data):
     def id_hash(self, data):
-        """Return HMAC hash using the "id" HMAC key
-        """
+        """Return HMAC hash using the "id" HMAC key"""
         raise NotImplementedError
         raise NotImplementedError
 
 
     def encrypt(self, id, data, compress=True):
     def encrypt(self, id, data, compress=True):
@@ -186,83 +189,79 @@ class KeyBase:
         if id and id != Manifest.MANIFEST_ID:
         if id and id != Manifest.MANIFEST_ID:
             id_computed = self.id_hash(data)
             id_computed = self.id_hash(data)
             if not hmac.compare_digest(id_computed, id):
             if not hmac.compare_digest(id_computed, id):
-                raise IntegrityError('Chunk %s: id verification failed' % bin_to_hex(id))
+                raise IntegrityError("Chunk %s: id verification failed" % bin_to_hex(id))
 
 
     def assert_type(self, type_byte, id=None):
     def assert_type(self, type_byte, id=None):
         if type_byte not in self.TYPES_ACCEPTABLE:
         if type_byte not in self.TYPES_ACCEPTABLE:
-            id_str = bin_to_hex(id) if id is not None else '(unknown)'
-            raise IntegrityError(f'Chunk {id_str}: Invalid encryption envelope')
+            id_str = bin_to_hex(id) if id is not None else "(unknown)"
+            raise IntegrityError(f"Chunk {id_str}: Invalid encryption envelope")
 
 
     def _tam_key(self, salt, context):
     def _tam_key(self, salt, context):
         return hkdf_hmac_sha512(
         return hkdf_hmac_sha512(
             ikm=self.id_key + self.enc_key + self.enc_hmac_key,
             ikm=self.id_key + self.enc_key + self.enc_hmac_key,
             salt=salt,
             salt=salt,
-            info=b'borg-metadata-authentication-' + context,
-            output_length=64
+            info=b"borg-metadata-authentication-" + context,
+            output_length=64,
         )
         )
 
 
-    def pack_and_authenticate_metadata(self, metadata_dict, context=b'manifest'):
+    def pack_and_authenticate_metadata(self, metadata_dict, context=b"manifest"):
         metadata_dict = StableDict(metadata_dict)
         metadata_dict = StableDict(metadata_dict)
-        tam = metadata_dict['tam'] = StableDict({
-            'type': 'HKDF_HMAC_SHA512',
-            'hmac': bytes(64),
-            'salt': os.urandom(64),
-        })
+        tam = metadata_dict["tam"] = StableDict({"type": "HKDF_HMAC_SHA512", "hmac": bytes(64), "salt": os.urandom(64)})
         packed = msgpack.packb(metadata_dict)
         packed = msgpack.packb(metadata_dict)
-        tam_key = self._tam_key(tam['salt'], context)
-        tam['hmac'] = hmac.digest(tam_key, packed, 'sha512')
+        tam_key = self._tam_key(tam["salt"], context)
+        tam["hmac"] = hmac.digest(tam_key, packed, "sha512")
         return msgpack.packb(metadata_dict)
         return msgpack.packb(metadata_dict)
 
 
     def unpack_and_verify_manifest(self, data, force_tam_not_required=False):
     def unpack_and_verify_manifest(self, data, force_tam_not_required=False):
         """Unpack msgpacked *data* and return (object, did_verify)."""
         """Unpack msgpacked *data* and return (object, did_verify)."""
-        if data.startswith(b'\xc1' * 4):
+        if data.startswith(b"\xc1" * 4):
             # This is a manifest from the future, we can't read it.
             # This is a manifest from the future, we can't read it.
             raise UnsupportedManifestError()
             raise UnsupportedManifestError()
         tam_required = self.tam_required
         tam_required = self.tam_required
         if force_tam_not_required and tam_required:
         if force_tam_not_required and tam_required:
-            logger.warning('Manifest authentication DISABLED.')
+            logger.warning("Manifest authentication DISABLED.")
             tam_required = False
             tam_required = False
         data = bytearray(data)
         data = bytearray(data)
-        unpacker = get_limited_unpacker('manifest')
+        unpacker = get_limited_unpacker("manifest")
         unpacker.feed(data)
         unpacker.feed(data)
         unpacked = unpacker.unpack()
         unpacked = unpacker.unpack()
-        if 'tam' not in unpacked:
+        if "tam" not in unpacked:
             if tam_required:
             if tam_required:
                 raise TAMRequiredError(self.repository._location.canonical_path())
                 raise TAMRequiredError(self.repository._location.canonical_path())
             else:
             else:
-                logger.debug('TAM not found and not required')
+                logger.debug("TAM not found and not required")
                 return unpacked, False
                 return unpacked, False
-        tam = unpacked.pop('tam', None)
+        tam = unpacked.pop("tam", None)
         if not isinstance(tam, dict):
         if not isinstance(tam, dict):
             raise TAMInvalid()
             raise TAMInvalid()
-        tam_type = tam.get('type', '<none>')
-        if tam_type != 'HKDF_HMAC_SHA512':
+        tam_type = tam.get("type", "<none>")
+        if tam_type != "HKDF_HMAC_SHA512":
             if tam_required:
             if tam_required:
                 raise TAMUnsupportedSuiteError(repr(tam_type))
                 raise TAMUnsupportedSuiteError(repr(tam_type))
             else:
             else:
-                logger.debug('Ignoring TAM made with unsupported suite, since TAM is not required: %r', tam_type)
+                logger.debug("Ignoring TAM made with unsupported suite, since TAM is not required: %r", tam_type)
                 return unpacked, False
                 return unpacked, False
-        tam_hmac = tam.get('hmac')
-        tam_salt = tam.get('salt')
+        tam_hmac = tam.get("hmac")
+        tam_salt = tam.get("salt")
         if not isinstance(tam_salt, (bytes, str)) or not isinstance(tam_hmac, (bytes, str)):
         if not isinstance(tam_salt, (bytes, str)) or not isinstance(tam_hmac, (bytes, str)):
             raise TAMInvalid()
             raise TAMInvalid()
         tam_hmac = want_bytes(tam_hmac)  # legacy
         tam_hmac = want_bytes(tam_hmac)  # legacy
         tam_salt = want_bytes(tam_salt)  # legacy
         tam_salt = want_bytes(tam_salt)  # legacy
         offset = data.index(tam_hmac)
         offset = data.index(tam_hmac)
-        data[offset:offset + 64] = bytes(64)
-        tam_key = self._tam_key(tam_salt, context=b'manifest')
-        calculated_hmac = hmac.digest(tam_key, data, 'sha512')
+        data[offset : offset + 64] = bytes(64)
+        tam_key = self._tam_key(tam_salt, context=b"manifest")
+        calculated_hmac = hmac.digest(tam_key, data, "sha512")
         if not hmac.compare_digest(calculated_hmac, tam_hmac):
         if not hmac.compare_digest(calculated_hmac, tam_hmac):
             raise TAMInvalid()
             raise TAMInvalid()
-        logger.debug('TAM-verified manifest')
+        logger.debug("TAM-verified manifest")
         return unpacked, True
         return unpacked, True
 
 
 
 
 class PlaintextKey(KeyBase):
 class PlaintextKey(KeyBase):
     TYPE = KeyType.PLAINTEXT
     TYPE = KeyType.PLAINTEXT
     TYPES_ACCEPTABLE = {TYPE}
     TYPES_ACCEPTABLE = {TYPE}
-    NAME = 'plaintext'
-    ARG_NAME = 'none'
+    NAME = "plaintext"
+    ARG_NAME = "none"
     STORAGE = KeyBlobStorage.NO_STORAGE
     STORAGE = KeyBlobStorage.NO_STORAGE
 
 
     chunk_seed = 0
     chunk_seed = 0
@@ -287,7 +286,7 @@ class PlaintextKey(KeyBase):
     def encrypt(self, id, data, compress=True):
     def encrypt(self, id, data, compress=True):
         if compress:
         if compress:
             data = self.compressor.compress(data)
             data = self.compressor.compress(data)
-        return b''.join([self.TYPE_STR, data])
+        return b"".join([self.TYPE_STR, data])
 
 
     def decrypt(self, id, data, decompress=True):
     def decrypt(self, id, data, decompress=True):
         self.assert_type(data[0], id)
         self.assert_type(data[0], id)
@@ -364,8 +363,7 @@ class AESKeyBase(KeyBase):
     def encrypt(self, id, data, compress=True):
     def encrypt(self, id, data, compress=True):
         if compress:
         if compress:
             data = self.compressor.compress(data)
             data = self.compressor.compress(data)
-        next_iv = self.nonce_manager.ensure_reservation(self.cipher.next_iv(),
-                                                        self.cipher.block_count(len(data)))
+        next_iv = self.nonce_manager.ensure_reservation(self.cipher.next_iv(), self.cipher.block_count(len(data)))
         return self.cipher.encrypt(data, header=self.TYPE_STR, iv=next_iv)
         return self.cipher.encrypt(data, header=self.TYPE_STR, iv=next_iv)
 
 
     def decrypt(self, id, data, decompress=True):
     def decrypt(self, id, data, decompress=True):
@@ -395,12 +393,10 @@ class AESKeyBase(KeyBase):
         chunk_seed = bytes_to_int(data[96:100])
         chunk_seed = bytes_to_int(data[96:100])
         # Convert to signed int32
         # Convert to signed int32
         if chunk_seed & 0x80000000:
         if chunk_seed & 0x80000000:
-            chunk_seed = chunk_seed - 0xffffffff - 1
+            chunk_seed = chunk_seed - 0xFFFFFFFF - 1
         self.init_from_given_data(
         self.init_from_given_data(
-            enc_key=data[0:32],
-            enc_hmac_key=data[32:64],
-            id_key=data[64:96],
-            chunk_seed=chunk_seed)
+            enc_key=data[0:32], enc_hmac_key=data[32:64], id_key=data[64:96], chunk_seed=chunk_seed
+        )
 
 
     def init_ciphers(self, manifest_data=None):
     def init_ciphers(self, manifest_data=None):
         self.cipher = self.CIPHERSUITE(mac_key=self.enc_hmac_key, enc_key=self.enc_key, header_len=1, aad_offset=1)
         self.cipher = self.CIPHERSUITE(mac_key=self.enc_hmac_key, enc_key=self.enc_key, header_len=1, aad_offset=1)
@@ -418,13 +414,13 @@ class AESKeyBase(KeyBase):
 
 
 
 
 class FlexiKey:
 class FlexiKey:
-    FILE_ID = 'BORG_KEY'
+    FILE_ID = "BORG_KEY"
 
 
     @classmethod
     @classmethod
     def detect(cls, repository, manifest_data):
     def detect(cls, repository, manifest_data):
         key = cls(repository)
         key = cls(repository)
         target = key.find_key()
         target = key.find_key()
-        prompt = 'Enter passphrase for key %s: ' % target
+        prompt = "Enter passphrase for key %s: " % target
         passphrase = Passphrase.env_passphrase()
         passphrase = Passphrase.env_passphrase()
         if passphrase is None:
         if passphrase is None:
             passphrase = Passphrase()
             passphrase = Passphrase()
@@ -449,18 +445,18 @@ class FlexiKey:
             data = msgpack.unpackb(data)
             data = msgpack.unpackb(data)
             key = Key(internal_dict=data)
             key = Key(internal_dict=data)
             if key.version != 1:
             if key.version != 1:
-                raise IntegrityError('Invalid key file header')
+                raise IntegrityError("Invalid key file header")
             self.repository_id = key.repository_id
             self.repository_id = key.repository_id
             self.enc_key = key.enc_key
             self.enc_key = key.enc_key
             self.enc_hmac_key = key.enc_hmac_key
             self.enc_hmac_key = key.enc_hmac_key
             self.id_key = key.id_key
             self.id_key = key.id_key
             self.chunk_seed = key.chunk_seed
             self.chunk_seed = key.chunk_seed
-            self.tam_required = key.get('tam_required', tam_required(self.repository))
+            self.tam_required = key.get("tam_required", tam_required(self.repository))
             return True
             return True
         return False
         return False
 
 
     def decrypt_key_file(self, data, passphrase):
     def decrypt_key_file(self, data, passphrase):
-        unpacker = get_limited_unpacker('key')
+        unpacker = get_limited_unpacker("key")
         unpacker.feed(data)
         unpacker.feed(data)
         data = unpacker.unpack()
         data = unpacker.unpack()
         encrypted_key = EncryptedKey(internal_dict=data)
         encrypted_key = EncryptedKey(internal_dict=data)
@@ -468,9 +464,9 @@ class FlexiKey:
             raise UnsupportedKeyFormatError()
             raise UnsupportedKeyFormatError()
         else:
         else:
             self._encrypted_key_algorithm = encrypted_key.algorithm
             self._encrypted_key_algorithm = encrypted_key.algorithm
-            if encrypted_key.algorithm == 'sha256':
+            if encrypted_key.algorithm == "sha256":
                 return self.decrypt_key_file_pbkdf2(encrypted_key, passphrase)
                 return self.decrypt_key_file_pbkdf2(encrypted_key, passphrase)
-            elif encrypted_key.algorithm == 'argon2 chacha20-poly1305':
+            elif encrypted_key.algorithm == "argon2 chacha20-poly1305":
                 return self.decrypt_key_file_argon2(encrypted_key, passphrase)
                 return self.decrypt_key_file_argon2(encrypted_key, passphrase)
             else:
             else:
                 raise UnsupportedKeyFormatError()
                 raise UnsupportedKeyFormatError()
@@ -479,7 +475,7 @@ class FlexiKey:
     def pbkdf2(passphrase, salt, iterations, output_len_in_bytes):
     def pbkdf2(passphrase, salt, iterations, output_len_in_bytes):
         if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
         if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
             iterations = 1
             iterations = 1
-        return pbkdf2_hmac('sha256', passphrase.encode('utf-8'), salt, iterations, output_len_in_bytes)
+        return pbkdf2_hmac("sha256", passphrase.encode("utf-8"), salt, iterations, output_len_in_bytes)
 
 
     @staticmethod
     @staticmethod
     def argon2(
     def argon2(
@@ -489,18 +485,14 @@ class FlexiKey:
         time_cost: int,
         time_cost: int,
         memory_cost: int,
         memory_cost: int,
         parallelism: int,
         parallelism: int,
-        type: Literal['i', 'd', 'id']
+        type: Literal["i", "d", "id"],
     ) -> bytes:
     ) -> bytes:
         if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
         if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
             time_cost = 1
             time_cost = 1
             parallelism = 1
             parallelism = 1
             # 8 is the smallest value that avoids the "Memory cost is too small" exception
             # 8 is the smallest value that avoids the "Memory cost is too small" exception
             memory_cost = 8
             memory_cost = 8
-        type_map = {
-            'i': argon2.low_level.Type.I,
-            'd': argon2.low_level.Type.D,
-            'id': argon2.low_level.Type.ID,
-        }
+        type_map = {"i": argon2.low_level.Type.I, "d": argon2.low_level.Type.D, "id": argon2.low_level.Type.ID}
         key = argon2.low_level.hash_secret_raw(
         key = argon2.low_level.hash_secret_raw(
             secret=passphrase.encode("utf-8"),
             secret=passphrase.encode("utf-8"),
             hash_len=output_len_in_bytes,
             hash_len=output_len_in_bytes,
@@ -514,7 +506,7 @@ class FlexiKey:
 
 
     def decrypt_key_file_pbkdf2(self, encrypted_key, passphrase):
     def decrypt_key_file_pbkdf2(self, encrypted_key, passphrase):
         key = self.pbkdf2(passphrase, encrypted_key.salt, encrypted_key.iterations, 32)
         key = self.pbkdf2(passphrase, encrypted_key.salt, encrypted_key.iterations, 32)
-        data = AES(key, b'\0'*16).decrypt(encrypted_key.data)
+        data = AES(key, b"\0" * 16).decrypt(encrypted_key.data)
         if hmac.compare_digest(hmac_sha256(key, data), encrypted_key.hash):
         if hmac.compare_digest(hmac_sha256(key, data), encrypted_key.hash):
             return data
             return data
         return None
         return None
@@ -536,44 +528,32 @@ class FlexiKey:
             return None
             return None
 
 
     def encrypt_key_file(self, data, passphrase, algorithm):
     def encrypt_key_file(self, data, passphrase, algorithm):
-        if algorithm == 'sha256':
+        if algorithm == "sha256":
             return self.encrypt_key_file_pbkdf2(data, passphrase)
             return self.encrypt_key_file_pbkdf2(data, passphrase)
-        elif algorithm == 'argon2 chacha20-poly1305':
+        elif algorithm == "argon2 chacha20-poly1305":
             return self.encrypt_key_file_argon2(data, passphrase)
             return self.encrypt_key_file_argon2(data, passphrase)
         else:
         else:
-            raise ValueError(f'Unexpected algorithm: {algorithm}')
+            raise ValueError(f"Unexpected algorithm: {algorithm}")
 
 
     def encrypt_key_file_pbkdf2(self, data, passphrase):
     def encrypt_key_file_pbkdf2(self, data, passphrase):
         salt = os.urandom(32)
         salt = os.urandom(32)
         iterations = PBKDF2_ITERATIONS
         iterations = PBKDF2_ITERATIONS
         key = self.pbkdf2(passphrase, salt, iterations, 32)
         key = self.pbkdf2(passphrase, salt, iterations, 32)
         hash = hmac_sha256(key, data)
         hash = hmac_sha256(key, data)
-        cdata = AES(key, b'\0'*16).encrypt(data)
-        enc_key = EncryptedKey(
-            version=1,
-            salt=salt,
-            iterations=iterations,
-            algorithm='sha256',
-            hash=hash,
-            data=cdata,
-        )
+        cdata = AES(key, b"\0" * 16).encrypt(data)
+        enc_key = EncryptedKey(version=1, salt=salt, iterations=iterations, algorithm="sha256", hash=hash, data=cdata)
         return msgpack.packb(enc_key.as_dict())
         return msgpack.packb(enc_key.as_dict())
 
 
     def encrypt_key_file_argon2(self, data, passphrase):
     def encrypt_key_file_argon2(self, data, passphrase):
         salt = os.urandom(ARGON2_SALT_BYTES)
         salt = os.urandom(ARGON2_SALT_BYTES)
-        key = self.argon2(
-            passphrase,
-            output_len_in_bytes=32,
-            salt=salt,
-            **ARGON2_ARGS,
-        )
+        key = self.argon2(passphrase, output_len_in_bytes=32, salt=salt, **ARGON2_ARGS)
         ae_cipher = CHACHA20_POLY1305(key=key, iv=0, header_len=0, aad_offset=0)
         ae_cipher = CHACHA20_POLY1305(key=key, iv=0, header_len=0, aad_offset=0)
         encrypted_key = EncryptedKey(
         encrypted_key = EncryptedKey(
             version=1,
             version=1,
-            algorithm='argon2 chacha20-poly1305',
+            algorithm="argon2 chacha20-poly1305",
             salt=salt,
             salt=salt,
             data=ae_cipher.encrypt(data),
             data=ae_cipher.encrypt(data),
-            **{'argon2_' + k: v for k, v in ARGON2_ARGS.items()},
+            **{"argon2_" + k: v for k, v in ARGON2_ARGS.items()},
         )
         )
         return msgpack.packb(encrypted_key.as_dict())
         return msgpack.packb(encrypted_key.as_dict())
 
 
@@ -588,7 +568,7 @@ class FlexiKey:
             tam_required=self.tam_required,
             tam_required=self.tam_required,
         )
         )
         data = self.encrypt_key_file(msgpack.packb(key.as_dict()), passphrase, algorithm)
         data = self.encrypt_key_file(msgpack.packb(key.as_dict()), passphrase, algorithm)
-        key_data = '\n'.join(textwrap.wrap(b2a_base64(data).decode('ascii')))
+        key_data = "\n".join(textwrap.wrap(b2a_base64(data).decode("ascii")))
         return key_data
         return key_data
 
 
     def change_passphrase(self, passphrase=None):
     def change_passphrase(self, passphrase=None):
@@ -612,22 +592,23 @@ class FlexiKey:
                 enc_key=other_key.enc_key,
                 enc_key=other_key.enc_key,
                 enc_hmac_key=other_key.enc_hmac_key,
                 enc_hmac_key=other_key.enc_hmac_key,
                 id_key=other_key.id_key,
                 id_key=other_key.id_key,
-                chunk_seed=other_key.chunk_seed)
+                chunk_seed=other_key.chunk_seed,
+            )
             passphrase = other_key._passphrase
             passphrase = other_key._passphrase
         else:
         else:
             key.init_from_random_data()
             key.init_from_random_data()
             passphrase = Passphrase.new(allow_empty=True)
             passphrase = Passphrase.new(allow_empty=True)
         key.init_ciphers()
         key.init_ciphers()
         target = key.get_new_target(args)
         target = key.get_new_target(args)
-        key.save(target, passphrase, create=True, algorithm=KEY_ALGORITHMS['argon2'])
+        key.save(target, passphrase, create=True, algorithm=KEY_ALGORITHMS["argon2"])
         logger.info('Key in "%s" created.' % target)
         logger.info('Key in "%s" created.' % target)
-        logger.info('Keep this key safe. Your data will be inaccessible without it.')
+        logger.info("Keep this key safe. Your data will be inaccessible without it.")
         return key
         return key
 
 
     def sanity_check(self, filename, id):
     def sanity_check(self, filename, id):
-        file_id = self.FILE_ID.encode() + b' '
+        file_id = self.FILE_ID.encode() + b" "
         repo_id = hexlify(id)
         repo_id = hexlify(id)
-        with open(filename, 'rb') as fd:
+        with open(filename, "rb") as fd:
             # we do the magic / id check in binary mode to avoid stumbling over
             # we do the magic / id check in binary mode to avoid stumbling over
             # decoding errors if somebody has binary files in the keys dir for some reason.
             # decoding errors if somebody has binary files in the keys dir for some reason.
             if fd.read(len(file_id)) != file_id:
             if fd.read(len(file_id)) != file_id:
@@ -653,7 +634,7 @@ class FlexiKey:
                 raise RepoKeyNotFoundError(loc) from None
                 raise RepoKeyNotFoundError(loc) from None
             return loc
             return loc
         else:
         else:
-            raise TypeError('Unsupported borg key storage type')
+            raise TypeError("Unsupported borg key storage type")
 
 
     def get_existing_or_new_target(self, args):
     def get_existing_or_new_target(self, args):
         keyfile = self._find_key_file_from_environment()
         keyfile = self._find_key_file_from_environment()
@@ -683,10 +664,10 @@ class FlexiKey:
         elif self.STORAGE == KeyBlobStorage.REPO:
         elif self.STORAGE == KeyBlobStorage.REPO:
             return self.repository
             return self.repository
         else:
         else:
-            raise TypeError('Unsupported borg key storage type')
+            raise TypeError("Unsupported borg key storage type")
 
 
     def _find_key_file_from_environment(self):
     def _find_key_file_from_environment(self):
-        keyfile = os.environ.get('BORG_KEY_FILE')
+        keyfile = os.environ.get("BORG_KEY_FILE")
         if keyfile:
         if keyfile:
             return os.path.abspath(keyfile)
             return os.path.abspath(keyfile)
 
 
@@ -696,17 +677,17 @@ class FlexiKey:
         i = 1
         i = 1
         while os.path.exists(path):
         while os.path.exists(path):
             i += 1
             i += 1
-            path = filename + '.%d' % i
+            path = filename + ".%d" % i
         return path
         return path
 
 
     def load(self, target, passphrase):
     def load(self, target, passphrase):
         if self.STORAGE == KeyBlobStorage.KEYFILE:
         if self.STORAGE == KeyBlobStorage.KEYFILE:
             with open(target) as fd:
             with open(target) as fd:
-                key_data = ''.join(fd.readlines()[1:])
+                key_data = "".join(fd.readlines()[1:])
         elif self.STORAGE == KeyBlobStorage.REPO:
         elif self.STORAGE == KeyBlobStorage.REPO:
             # While the repository is encrypted, we consider a repokey repository with a blank
             # While the repository is encrypted, we consider a repokey repository with a blank
             # passphrase an unencrypted repository.
             # passphrase an unencrypted repository.
-            self.logically_encrypted = passphrase != ''
+            self.logically_encrypted = passphrase != ""
 
 
             # what we get in target is just a repo location, but we already have the repo obj:
             # what we get in target is just a repo location, but we already have the repo obj:
             target = self.repository
             target = self.repository
@@ -715,9 +696,9 @@ class FlexiKey:
                 # if we got an empty key, it means there is no key.
                 # if we got an empty key, it means there is no key.
                 loc = target._location.canonical_path()
                 loc = target._location.canonical_path()
                 raise RepoKeyNotFoundError(loc) from None
                 raise RepoKeyNotFoundError(loc) from None
-            key_data = key_data.decode('utf-8')  # remote repo: msgpack issue #99, getting bytes
+            key_data = key_data.decode("utf-8")  # remote repo: msgpack issue #99, getting bytes
         else:
         else:
-            raise TypeError('Unsupported borg key storage type')
+            raise TypeError("Unsupported borg key storage type")
         success = self._load(key_data, passphrase)
         success = self._load(key_data, passphrase)
         if success:
         if success:
             self.target = target
             self.target = target
@@ -732,31 +713,31 @@ class FlexiKey:
                 # see issue #6036
                 # see issue #6036
                 raise Error('Aborting because key in "%s" already exists.' % target)
                 raise Error('Aborting because key in "%s" already exists.' % target)
             with SaveFile(target) as fd:
             with SaveFile(target) as fd:
-                fd.write(f'{self.FILE_ID} {bin_to_hex(self.repository_id)}\n')
+                fd.write(f"{self.FILE_ID} {bin_to_hex(self.repository_id)}\n")
                 fd.write(key_data)
                 fd.write(key_data)
-                fd.write('\n')
+                fd.write("\n")
         elif self.STORAGE == KeyBlobStorage.REPO:
         elif self.STORAGE == KeyBlobStorage.REPO:
-            self.logically_encrypted = passphrase != ''
-            key_data = key_data.encode('utf-8')  # remote repo: msgpack issue #99, giving bytes
+            self.logically_encrypted = passphrase != ""
+            key_data = key_data.encode("utf-8")  # remote repo: msgpack issue #99, giving bytes
             target.save_key(key_data)
             target.save_key(key_data)
         else:
         else:
-            raise TypeError('Unsupported borg key storage type')
+            raise TypeError("Unsupported borg key storage type")
         self.target = target
         self.target = target
 
 
     def remove(self, target):
     def remove(self, target):
         if self.STORAGE == KeyBlobStorage.KEYFILE:
         if self.STORAGE == KeyBlobStorage.KEYFILE:
             os.remove(target)
             os.remove(target)
         elif self.STORAGE == KeyBlobStorage.REPO:
         elif self.STORAGE == KeyBlobStorage.REPO:
-            target.save_key(b'')  # save empty key (no new api at remote repo necessary)
+            target.save_key(b"")  # save empty key (no new api at remote repo necessary)
         else:
         else:
-            raise TypeError('Unsupported borg key storage type')
+            raise TypeError("Unsupported borg key storage type")
 
 
 
 
 class KeyfileKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
 class KeyfileKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.KEYFILE, KeyType.REPO, KeyType.PASSPHRASE}
     TYPES_ACCEPTABLE = {KeyType.KEYFILE, KeyType.REPO, KeyType.PASSPHRASE}
     TYPE = KeyType.KEYFILE
     TYPE = KeyType.KEYFILE
-    NAME = 'key file'
-    ARG_NAME = 'keyfile'
+    NAME = "key file"
+    ARG_NAME = "keyfile"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = AES256_CTR_HMAC_SHA256
     CIPHERSUITE = AES256_CTR_HMAC_SHA256
 
 
@@ -764,8 +745,8 @@ class KeyfileKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
 class RepoKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
 class RepoKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.KEYFILE, KeyType.REPO, KeyType.PASSPHRASE}
     TYPES_ACCEPTABLE = {KeyType.KEYFILE, KeyType.REPO, KeyType.PASSPHRASE}
     TYPE = KeyType.REPO
     TYPE = KeyType.REPO
-    NAME = 'repokey'
-    ARG_NAME = 'repokey'
+    NAME = "repokey"
+    ARG_NAME = "repokey"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = AES256_CTR_HMAC_SHA256
     CIPHERSUITE = AES256_CTR_HMAC_SHA256
 
 
@@ -773,8 +754,8 @@ class RepoKey(ID_HMAC_SHA_256, AESKeyBase, FlexiKey):
 class Blake2KeyfileKey(ID_BLAKE2b_256, AESKeyBase, FlexiKey):
 class Blake2KeyfileKey(ID_BLAKE2b_256, AESKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2KEYFILE, KeyType.BLAKE2REPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2KEYFILE, KeyType.BLAKE2REPO}
     TYPE = KeyType.BLAKE2KEYFILE
     TYPE = KeyType.BLAKE2KEYFILE
-    NAME = 'key file BLAKE2b'
-    ARG_NAME = 'keyfile-blake2'
+    NAME = "key file BLAKE2b"
+    ARG_NAME = "keyfile-blake2"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = AES256_CTR_BLAKE2b
     CIPHERSUITE = AES256_CTR_BLAKE2b
 
 
@@ -782,8 +763,8 @@ class Blake2KeyfileKey(ID_BLAKE2b_256, AESKeyBase, FlexiKey):
 class Blake2RepoKey(ID_BLAKE2b_256, AESKeyBase, FlexiKey):
 class Blake2RepoKey(ID_BLAKE2b_256, AESKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2KEYFILE, KeyType.BLAKE2REPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2KEYFILE, KeyType.BLAKE2REPO}
     TYPE = KeyType.BLAKE2REPO
     TYPE = KeyType.BLAKE2REPO
-    NAME = 'repokey BLAKE2b'
-    ARG_NAME = 'repokey-blake2'
+    NAME = "repokey BLAKE2b"
+    ARG_NAME = "repokey-blake2"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = AES256_CTR_BLAKE2b
     CIPHERSUITE = AES256_CTR_BLAKE2b
 
 
@@ -810,7 +791,7 @@ class AuthenticatedKeyBase(AESKeyBase, FlexiKey):
     def encrypt(self, id, data, compress=True):
     def encrypt(self, id, data, compress=True):
         if compress:
         if compress:
             data = self.compressor.compress(data)
             data = self.compressor.compress(data)
-        return b''.join([self.TYPE_STR, data])
+        return b"".join([self.TYPE_STR, data])
 
 
     def decrypt(self, id, data, decompress=True):
     def decrypt(self, id, data, decompress=True):
         self.assert_type(data[0], id)
         self.assert_type(data[0], id)
@@ -825,15 +806,15 @@ class AuthenticatedKeyBase(AESKeyBase, FlexiKey):
 class AuthenticatedKey(ID_HMAC_SHA_256, AuthenticatedKeyBase):
 class AuthenticatedKey(ID_HMAC_SHA_256, AuthenticatedKeyBase):
     TYPE = KeyType.AUTHENTICATED
     TYPE = KeyType.AUTHENTICATED
     TYPES_ACCEPTABLE = {TYPE}
     TYPES_ACCEPTABLE = {TYPE}
-    NAME = 'authenticated'
-    ARG_NAME = 'authenticated'
+    NAME = "authenticated"
+    ARG_NAME = "authenticated"
 
 
 
 
 class Blake2AuthenticatedKey(ID_BLAKE2b_256, AuthenticatedKeyBase):
 class Blake2AuthenticatedKey(ID_BLAKE2b_256, AuthenticatedKeyBase):
     TYPE = KeyType.BLAKE2AUTHENTICATED
     TYPE = KeyType.BLAKE2AUTHENTICATED
     TYPES_ACCEPTABLE = {TYPE}
     TYPES_ACCEPTABLE = {TYPE}
-    NAME = 'authenticated BLAKE2b'
-    ARG_NAME = 'authenticated-blake2'
+    NAME = "authenticated BLAKE2b"
+    ARG_NAME = "authenticated-blake2"
 
 
 
 
 # ------------ new crypto ------------
 # ------------ new crypto ------------
@@ -862,17 +843,17 @@ class AEADKeyBase(KeyBase):
 
 
     logically_encrypted = True
     logically_encrypted = True
 
 
-    MAX_IV = 2 ** 48 - 1
+    MAX_IV = 2**48 - 1
 
 
     def encrypt(self, id, data, compress=True):
     def encrypt(self, id, data, compress=True):
         # to encrypt new data in this session we use always self.cipher and self.sessionid
         # to encrypt new data in this session we use always self.cipher and self.sessionid
         if compress:
         if compress:
             data = self.compressor.compress(data)
             data = self.compressor.compress(data)
-        reserved = b'\0'
+        reserved = b"\0"
         iv = self.cipher.next_iv()
         iv = self.cipher.next_iv()
         if iv > self.MAX_IV:  # see the data-structures docs about why the IV range is enough
         if iv > self.MAX_IV:  # see the data-structures docs about why the IV range is enough
             raise IntegrityError("IV overflow, should never happen.")
             raise IntegrityError("IV overflow, should never happen.")
-        iv_48bit = iv.to_bytes(6, 'big')
+        iv_48bit = iv.to_bytes(6, "big")
         header = self.TYPE_STR + reserved + iv_48bit + self.sessionid
         header = self.TYPE_STR + reserved + iv_48bit + self.sessionid
         return self.cipher.encrypt(data, header=header, iv=iv, aad=id)
         return self.cipher.encrypt(data, header=header, iv=iv, aad=id)
 
 
@@ -881,7 +862,7 @@ class AEADKeyBase(KeyBase):
         self.assert_type(data[0], id)
         self.assert_type(data[0], id)
         iv_48bit = data[2:8]
         iv_48bit = data[2:8]
         sessionid = data[8:32]
         sessionid = data[8:32]
-        iv = int.from_bytes(iv_48bit, 'big')
+        iv = int.from_bytes(iv_48bit, "big")
         cipher = self._get_cipher(sessionid, iv)
         cipher = self._get_cipher(sessionid, iv)
         try:
         try:
             payload = cipher.decrypt(data, aad=id)
             payload = cipher.decrypt(data, aad=id)
@@ -908,27 +889,25 @@ class AEADKeyBase(KeyBase):
         chunk_seed = bytes_to_int(data[96:100])
         chunk_seed = bytes_to_int(data[96:100])
         # Convert to signed int32
         # Convert to signed int32
         if chunk_seed & 0x80000000:
         if chunk_seed & 0x80000000:
-            chunk_seed = chunk_seed - 0xffffffff - 1
+            chunk_seed = chunk_seed - 0xFFFFFFFF - 1
         self.init_from_given_data(
         self.init_from_given_data(
-            enc_key=data[0:32],
-            enc_hmac_key=data[32:64],
-            id_key=data[64:96],
-            chunk_seed=chunk_seed)
+            enc_key=data[0:32], enc_hmac_key=data[32:64], id_key=data[64:96], chunk_seed=chunk_seed
+        )
 
 
     def _get_session_key(self, sessionid):
     def _get_session_key(self, sessionid):
         assert len(sessionid) == 24  # 192bit
         assert len(sessionid) == 24  # 192bit
         key = hkdf_hmac_sha512(
         key = hkdf_hmac_sha512(
             ikm=self.enc_key + self.enc_hmac_key,
             ikm=self.enc_key + self.enc_hmac_key,
             salt=sessionid,
             salt=sessionid,
-            info=b'borg-session-key-' + self.CIPHERSUITE.__name__.encode(),
-            output_length=32
+            info=b"borg-session-key-" + self.CIPHERSUITE.__name__.encode(),
+            output_length=32,
         )
         )
         return key
         return key
 
 
     def _get_cipher(self, sessionid, iv):
     def _get_cipher(self, sessionid, iv):
         assert isinstance(iv, int)
         assert isinstance(iv, int)
         key = self._get_session_key(sessionid)
         key = self._get_session_key(sessionid)
-        cipher = self.CIPHERSUITE(key=key, iv=iv, header_len=1+1+6+24, aad_offset=0)
+        cipher = self.CIPHERSUITE(key=key, iv=iv, header_len=1 + 1 + 6 + 24, aad_offset=0)
         return cipher
         return cipher
 
 
     def init_ciphers(self, manifest_data=None, iv=0):
     def init_ciphers(self, manifest_data=None, iv=0):
@@ -940,8 +919,8 @@ class AEADKeyBase(KeyBase):
 class AESOCBKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class AESOCBKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.AESOCBKEYFILE, KeyType.AESOCBREPO}
     TYPES_ACCEPTABLE = {KeyType.AESOCBKEYFILE, KeyType.AESOCBREPO}
     TYPE = KeyType.AESOCBKEYFILE
     TYPE = KeyType.AESOCBKEYFILE
-    NAME = 'key file AES-OCB'
-    ARG_NAME = 'keyfile-aes-ocb'
+    NAME = "key file AES-OCB"
+    ARG_NAME = "keyfile-aes-ocb"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = AES256_OCB
     CIPHERSUITE = AES256_OCB
 
 
@@ -949,8 +928,8 @@ class AESOCBKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class AESOCBRepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class AESOCBRepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.AESOCBKEYFILE, KeyType.AESOCBREPO}
     TYPES_ACCEPTABLE = {KeyType.AESOCBKEYFILE, KeyType.AESOCBREPO}
     TYPE = KeyType.AESOCBREPO
     TYPE = KeyType.AESOCBREPO
-    NAME = 'repokey AES-OCB'
-    ARG_NAME = 'repokey-aes-ocb'
+    NAME = "repokey AES-OCB"
+    ARG_NAME = "repokey-aes-ocb"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = AES256_OCB
     CIPHERSUITE = AES256_OCB
 
 
@@ -958,8 +937,8 @@ class AESOCBRepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class CHPOKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class CHPOKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.CHPOKEYFILE, KeyType.CHPOREPO}
     TYPES_ACCEPTABLE = {KeyType.CHPOKEYFILE, KeyType.CHPOREPO}
     TYPE = KeyType.CHPOKEYFILE
     TYPE = KeyType.CHPOKEYFILE
-    NAME = 'key file ChaCha20-Poly1305'
-    ARG_NAME = 'keyfile-chacha20-poly1305'
+    NAME = "key file ChaCha20-Poly1305"
+    ARG_NAME = "keyfile-chacha20-poly1305"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = CHACHA20_POLY1305
     CIPHERSUITE = CHACHA20_POLY1305
 
 
@@ -967,8 +946,8 @@ class CHPOKeyfileKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class CHPORepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class CHPORepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.CHPOKEYFILE, KeyType.CHPOREPO}
     TYPES_ACCEPTABLE = {KeyType.CHPOKEYFILE, KeyType.CHPOREPO}
     TYPE = KeyType.CHPOREPO
     TYPE = KeyType.CHPOREPO
-    NAME = 'repokey ChaCha20-Poly1305'
-    ARG_NAME = 'repokey-chacha20-poly1305'
+    NAME = "repokey ChaCha20-Poly1305"
+    ARG_NAME = "repokey-chacha20-poly1305"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = CHACHA20_POLY1305
     CIPHERSUITE = CHACHA20_POLY1305
 
 
@@ -976,8 +955,8 @@ class CHPORepoKey(ID_HMAC_SHA_256, AEADKeyBase, FlexiKey):
 class Blake2AESOCBKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2AESOCBKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2AESOCBKEYFILE, KeyType.BLAKE2AESOCBREPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2AESOCBKEYFILE, KeyType.BLAKE2AESOCBREPO}
     TYPE = KeyType.BLAKE2AESOCBKEYFILE
     TYPE = KeyType.BLAKE2AESOCBKEYFILE
-    NAME = 'key file BLAKE2b AES-OCB'
-    ARG_NAME = 'keyfile-blake2-aes-ocb'
+    NAME = "key file BLAKE2b AES-OCB"
+    ARG_NAME = "keyfile-blake2-aes-ocb"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = AES256_OCB
     CIPHERSUITE = AES256_OCB
 
 
@@ -985,8 +964,8 @@ class Blake2AESOCBKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2AESOCBRepoKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2AESOCBRepoKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2AESOCBKEYFILE, KeyType.BLAKE2AESOCBREPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2AESOCBKEYFILE, KeyType.BLAKE2AESOCBREPO}
     TYPE = KeyType.BLAKE2AESOCBREPO
     TYPE = KeyType.BLAKE2AESOCBREPO
-    NAME = 'repokey BLAKE2b AES-OCB'
-    ARG_NAME = 'repokey-blake2-aes-ocb'
+    NAME = "repokey BLAKE2b AES-OCB"
+    ARG_NAME = "repokey-blake2-aes-ocb"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = AES256_OCB
     CIPHERSUITE = AES256_OCB
 
 
@@ -994,8 +973,8 @@ class Blake2AESOCBRepoKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2CHPOKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2CHPOKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2CHPOKEYFILE, KeyType.BLAKE2CHPOREPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2CHPOKEYFILE, KeyType.BLAKE2CHPOREPO}
     TYPE = KeyType.BLAKE2CHPOKEYFILE
     TYPE = KeyType.BLAKE2CHPOKEYFILE
-    NAME = 'key file BLAKE2b ChaCha20-Poly1305'
-    ARG_NAME = 'keyfile-blake2-chacha20-poly1305'
+    NAME = "key file BLAKE2b ChaCha20-Poly1305"
+    ARG_NAME = "keyfile-blake2-chacha20-poly1305"
     STORAGE = KeyBlobStorage.KEYFILE
     STORAGE = KeyBlobStorage.KEYFILE
     CIPHERSUITE = CHACHA20_POLY1305
     CIPHERSUITE = CHACHA20_POLY1305
 
 
@@ -1003,26 +982,33 @@ class Blake2CHPOKeyfileKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2CHPORepoKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
 class Blake2CHPORepoKey(ID_BLAKE2b_256, AEADKeyBase, FlexiKey):
     TYPES_ACCEPTABLE = {KeyType.BLAKE2CHPOKEYFILE, KeyType.BLAKE2CHPOREPO}
     TYPES_ACCEPTABLE = {KeyType.BLAKE2CHPOKEYFILE, KeyType.BLAKE2CHPOREPO}
     TYPE = KeyType.BLAKE2CHPOREPO
     TYPE = KeyType.BLAKE2CHPOREPO
-    NAME = 'repokey BLAKE2b ChaCha20-Poly1305'
-    ARG_NAME = 'repokey-blake2-chacha20-poly1305'
+    NAME = "repokey BLAKE2b ChaCha20-Poly1305"
+    ARG_NAME = "repokey-blake2-chacha20-poly1305"
     STORAGE = KeyBlobStorage.REPO
     STORAGE = KeyBlobStorage.REPO
     CIPHERSUITE = CHACHA20_POLY1305
     CIPHERSUITE = CHACHA20_POLY1305
 
 
 
 
 LEGACY_KEY_TYPES = (
 LEGACY_KEY_TYPES = (
     # legacy (AES-CTR based) crypto
     # legacy (AES-CTR based) crypto
-    KeyfileKey, RepoKey,
-    Blake2KeyfileKey, Blake2RepoKey,
+    KeyfileKey,
+    RepoKey,
+    Blake2KeyfileKey,
+    Blake2RepoKey,
 )
 )
 
 
 AVAILABLE_KEY_TYPES = (
 AVAILABLE_KEY_TYPES = (
     # these are available encryption modes for new repositories
     # these are available encryption modes for new repositories
     # not encrypted modes
     # not encrypted modes
     PlaintextKey,
     PlaintextKey,
-    AuthenticatedKey, Blake2AuthenticatedKey,
+    AuthenticatedKey,
+    Blake2AuthenticatedKey,
     # new crypto
     # new crypto
-    AESOCBKeyfileKey, AESOCBRepoKey,
-    CHPOKeyfileKey, CHPORepoKey,
-    Blake2AESOCBKeyfileKey, Blake2AESOCBRepoKey,
-    Blake2CHPOKeyfileKey, Blake2CHPORepoKey,
+    AESOCBKeyfileKey,
+    AESOCBRepoKey,
+    CHPOKeyfileKey,
+    CHPORepoKey,
+    Blake2AESOCBKeyfileKey,
+    Blake2AESOCBRepoKey,
+    Blake2CHPOKeyfileKey,
+    Blake2CHPORepoKey,
 )
 )

+ 47 - 45
src/borg/crypto/keymanager.py

@@ -53,7 +53,7 @@ class KeyManager:
             k = CHPOKeyfileKey(self.repository)
             k = CHPOKeyfileKey(self.repository)
             target = k.find_key()
             target = k.find_key()
             with open(target) as fd:
             with open(target) as fd:
-                self.keyblob = ''.join(fd.readlines()[1:])
+                self.keyblob = "".join(fd.readlines()[1:])
 
 
         elif self.keyblob_storage == KeyBlobStorage.REPO:
         elif self.keyblob_storage == KeyBlobStorage.REPO:
             key_data = self.repository.load_key().decode()
             key_data = self.repository.load_key().decode()
@@ -70,75 +70,77 @@ class KeyManager:
 
 
             self.store_keyfile(target)
             self.store_keyfile(target)
         elif self.keyblob_storage == KeyBlobStorage.REPO:
         elif self.keyblob_storage == KeyBlobStorage.REPO:
-            self.repository.save_key(self.keyblob.encode('utf-8'))
+            self.repository.save_key(self.keyblob.encode("utf-8"))
 
 
     def get_keyfile_data(self):
     def get_keyfile_data(self):
-        data = f'{CHPOKeyfileKey.FILE_ID} {bin_to_hex(self.repository.id)}\n'
+        data = f"{CHPOKeyfileKey.FILE_ID} {bin_to_hex(self.repository.id)}\n"
         data += self.keyblob
         data += self.keyblob
-        if not self.keyblob.endswith('\n'):
-            data += '\n'
+        if not self.keyblob.endswith("\n"):
+            data += "\n"
         return data
         return data
 
 
     def store_keyfile(self, target):
     def store_keyfile(self, target):
-        with dash_open(target, 'w') as fd:
+        with dash_open(target, "w") as fd:
             fd.write(self.get_keyfile_data())
             fd.write(self.get_keyfile_data())
 
 
     def export(self, path):
     def export(self, path):
         if path is None:
         if path is None:
-            path = '-'
+            path = "-"
 
 
         self.store_keyfile(path)
         self.store_keyfile(path)
 
 
     def export_qr(self, path):
     def export_qr(self, path):
         if path is None:
         if path is None:
-            path = '-'
+            path = "-"
 
 
-        with dash_open(path, 'wb') as fd:
+        with dash_open(path, "wb") as fd:
             key_data = self.get_keyfile_data()
             key_data = self.get_keyfile_data()
-            html = pkgutil.get_data('borg', 'paperkey.html')
-            html = html.replace(b'</textarea>', key_data.encode() + b'</textarea>')
+            html = pkgutil.get_data("borg", "paperkey.html")
+            html = html.replace(b"</textarea>", key_data.encode() + b"</textarea>")
             fd.write(html)
             fd.write(html)
 
 
     def export_paperkey(self, path):
     def export_paperkey(self, path):
         if path is None:
         if path is None:
-            path = '-'
+            path = "-"
 
 
         def grouped(s):
         def grouped(s):
-            ret = ''
+            ret = ""
             i = 0
             i = 0
             for ch in s:
             for ch in s:
                 if i and i % 6 == 0:
                 if i and i % 6 == 0:
-                    ret += ' '
+                    ret += " "
                 ret += ch
                 ret += ch
                 i += 1
                 i += 1
             return ret
             return ret
 
 
-        export = 'To restore key use borg key import --paper /path/to/repo\n\n'
+        export = "To restore key use borg key import --paper /path/to/repo\n\n"
 
 
         binary = a2b_base64(self.keyblob)
         binary = a2b_base64(self.keyblob)
-        export += 'BORG PAPER KEY v1\n'
+        export += "BORG PAPER KEY v1\n"
         lines = (len(binary) + 17) // 18
         lines = (len(binary) + 17) // 18
         repoid = bin_to_hex(self.repository.id)[:18]
         repoid = bin_to_hex(self.repository.id)[:18]
         complete_checksum = sha256_truncated(binary, 12)
         complete_checksum = sha256_truncated(binary, 12)
-        export += 'id: {:d} / {} / {} - {}\n'.format(lines,
-                                       grouped(repoid),
-                                       grouped(complete_checksum),
-                                       sha256_truncated((str(lines) + '/' + repoid + '/' + complete_checksum).encode('ascii'), 2))
+        export += "id: {:d} / {} / {} - {}\n".format(
+            lines,
+            grouped(repoid),
+            grouped(complete_checksum),
+            sha256_truncated((str(lines) + "/" + repoid + "/" + complete_checksum).encode("ascii"), 2),
+        )
         idx = 0
         idx = 0
         while len(binary):
         while len(binary):
             idx += 1
             idx += 1
             binline = binary[:18]
             binline = binary[:18]
-            checksum = sha256_truncated(idx.to_bytes(2, byteorder='big') + binline, 2)
-            export += f'{idx:2d}: {grouped(bin_to_hex(binline))} - {checksum}\n'
+            checksum = sha256_truncated(idx.to_bytes(2, byteorder="big") + binline, 2)
+            export += f"{idx:2d}: {grouped(bin_to_hex(binline))} - {checksum}\n"
             binary = binary[18:]
             binary = binary[18:]
 
 
-        with dash_open(path, 'w') as fd:
+        with dash_open(path, "w") as fd:
             fd.write(export)
             fd.write(export)
 
 
     def import_keyfile(self, args):
     def import_keyfile(self, args):
         file_id = CHPOKeyfileKey.FILE_ID
         file_id = CHPOKeyfileKey.FILE_ID
-        first_line = file_id + ' ' + bin_to_hex(self.repository.id) + '\n'
-        with dash_open(args.path, 'r') as fd:
+        first_line = file_id + " " + bin_to_hex(self.repository.id) + "\n"
+        with dash_open(args.path, "r") as fd:
             file_first_line = fd.read(len(first_line))
             file_first_line = fd.read(len(first_line))
             if file_first_line != first_line:
             if file_first_line != first_line:
                 if not file_first_line.startswith(file_id):
                 if not file_first_line.startswith(file_id):
@@ -154,52 +156,52 @@ class KeyManager:
             # imported here because it has global side effects
             # imported here because it has global side effects
             import readline
             import readline
         except ImportError:
         except ImportError:
-            print('Note: No line editing available due to missing readline support')
+            print("Note: No line editing available due to missing readline support")
 
 
         repoid = bin_to_hex(self.repository.id)[:18]
         repoid = bin_to_hex(self.repository.id)[:18]
         try:
         try:
             while True:  # used for repeating on overall checksum mismatch
             while True:  # used for repeating on overall checksum mismatch
                 # id line input
                 # id line input
                 while True:
                 while True:
-                    idline = input('id: ').replace(' ', '')
-                    if idline == '':
-                        if yes('Abort import? [yN]:'):
+                    idline = input("id: ").replace(" ", "")
+                    if idline == "":
+                        if yes("Abort import? [yN]:"):
                             raise EOFError()
                             raise EOFError()
 
 
                     try:
                     try:
-                        (data, checksum) = idline.split('-')
+                        (data, checksum) = idline.split("-")
                     except ValueError:
                     except ValueError:
                         print("each line must contain exactly one '-', try again")
                         print("each line must contain exactly one '-', try again")
                         continue
                         continue
                     try:
                     try:
-                        (id_lines, id_repoid, id_complete_checksum) = data.split('/')
+                        (id_lines, id_repoid, id_complete_checksum) = data.split("/")
                     except ValueError:
                     except ValueError:
                         print("the id line must contain exactly three '/', try again")
                         print("the id line must contain exactly three '/', try again")
                         continue
                         continue
-                    if sha256_truncated(data.lower().encode('ascii'), 2) != checksum:
-                        print('line checksum did not match, try same line again')
+                    if sha256_truncated(data.lower().encode("ascii"), 2) != checksum:
+                        print("line checksum did not match, try same line again")
                         continue
                         continue
                     try:
                     try:
                         lines = int(id_lines)
                         lines = int(id_lines)
                     except ValueError:
                     except ValueError:
-                        print('internal error while parsing length')
+                        print("internal error while parsing length")
 
 
                     break
                     break
 
 
                 if repoid != id_repoid:
                 if repoid != id_repoid:
                     raise RepoIdMismatch()
                     raise RepoIdMismatch()
 
 
-                result = b''
+                result = b""
                 idx = 1
                 idx = 1
                 # body line input
                 # body line input
                 while True:
                 while True:
-                    inline = input(f'{idx:2d}: ')
-                    inline = inline.replace(' ', '')
-                    if inline == '':
-                        if yes('Abort import? [yN]:'):
+                    inline = input(f"{idx:2d}: ")
+                    inline = inline.replace(" ", "")
+                    if inline == "":
+                        if yes("Abort import? [yN]:"):
                             raise EOFError()
                             raise EOFError()
                     try:
                     try:
-                        (data, checksum) = inline.split('-')
+                        (data, checksum) = inline.split("-")
                     except ValueError:
                     except ValueError:
                         print("each line must contain exactly one '-', try again")
                         print("each line must contain exactly one '-', try again")
                         continue
                         continue
@@ -208,8 +210,8 @@ class KeyManager:
                     except binascii.Error:
                     except binascii.Error:
                         print("only characters 0-9 and a-f and '-' are valid, try again")
                         print("only characters 0-9 and a-f and '-' are valid, try again")
                         continue
                         continue
-                    if sha256_truncated(idx.to_bytes(2, byteorder='big') + part, 2) != checksum:
-                        print(f'line checksum did not match, try line {idx} again')
+                    if sha256_truncated(idx.to_bytes(2, byteorder="big") + part, 2) != checksum:
+                        print(f"line checksum did not match, try line {idx} again")
                         continue
                         continue
                     result += part
                     result += part
                     if idx == lines:
                     if idx == lines:
@@ -217,13 +219,13 @@ class KeyManager:
                     idx += 1
                     idx += 1
 
 
                 if sha256_truncated(result, 12) != id_complete_checksum:
                 if sha256_truncated(result, 12) != id_complete_checksum:
-                    print('The overall checksum did not match, retry or enter a blank line to abort.')
+                    print("The overall checksum did not match, retry or enter a blank line to abort.")
                     continue
                     continue
 
 
-                self.keyblob = '\n'.join(textwrap.wrap(b2a_base64(result).decode('ascii'))) + '\n'
+                self.keyblob = "\n".join(textwrap.wrap(b2a_base64(result).decode("ascii"))) + "\n"
                 self.store_keyblob(args)
                 self.store_keyblob(args)
                 break
                 break
 
 
         except EOFError:
         except EOFError:
-            print('\n - aborted')
+            print("\n - aborted")
             return
             return

+ 6 - 2
src/borg/crypto/nonces.py

@@ -18,7 +18,7 @@ class NonceManager:
         self.repository = repository
         self.repository = repository
         self.end_of_nonce_reservation = None
         self.end_of_nonce_reservation = None
         self.manifest_nonce = manifest_nonce
         self.manifest_nonce = manifest_nonce
-        self.nonce_file = os.path.join(get_security_dir(self.repository.id_str), 'nonce')
+        self.nonce_file = os.path.join(get_security_dir(self.repository.id_str), "nonce")
 
 
     def get_local_free_nonce(self):
     def get_local_free_nonce(self):
         try:
         try:
@@ -78,7 +78,11 @@ class NonceManager:
 
 
         repo_free_nonce = self.get_repo_free_nonce()
         repo_free_nonce = self.get_repo_free_nonce()
         local_free_nonce = self.get_local_free_nonce()
         local_free_nonce = self.get_local_free_nonce()
-        free_nonce_space = max(x for x in (repo_free_nonce, local_free_nonce, self.manifest_nonce, self.end_of_nonce_reservation) if x is not None)
+        free_nonce_space = max(
+            x
+            for x in (repo_free_nonce, local_free_nonce, self.manifest_nonce, self.end_of_nonce_reservation)
+            if x is not None
+        )
         reservation_end = free_nonce_space + nonce_space_needed + NONCE_SPACE_RESERVATION
         reservation_end = free_nonce_space + nonce_space_needed + NONCE_SPACE_RESERVATION
         assert reservation_end < MAX_REPRESENTABLE_NONCE
         assert reservation_end < MAX_REPRESENTABLE_NONCE
         self.commit_repo_nonce_reservation(reservation_end, repo_free_nonce)
         self.commit_repo_nonce_reservation(reservation_end, repo_free_nonce)

+ 119 - 94
src/borg/fuse.py

@@ -20,7 +20,9 @@ if has_pyfuse3:
         @functools.wraps(fn)
         @functools.wraps(fn)
         async def wrapper(*args, **kwargs):
         async def wrapper(*args, **kwargs):
             return fn(*args, **kwargs)
             return fn(*args, **kwargs)
+
         return wrapper
         return wrapper
+
 else:
 else:
     trio = None
     trio = None
 
 
@@ -29,6 +31,7 @@ else:
 
 
 
 
 from .logger import create_logger
 from .logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .crypto.low_level import blake2b_128
 from .crypto.low_level import blake2b_128
@@ -79,7 +82,7 @@ class ItemCache:
     # to resize it in the first place; that's free).
     # to resize it in the first place; that's free).
     GROW_META_BY = 2 * 1024 * 1024
     GROW_META_BY = 2 * 1024 * 1024
 
 
-    indirect_entry_struct = struct.Struct('=cII')
+    indirect_entry_struct = struct.Struct("=cII")
     assert indirect_entry_struct.size == 9
     assert indirect_entry_struct.size == 9
 
 
     def __init__(self, decrypted_repository):
     def __init__(self, decrypted_repository):
@@ -105,7 +108,7 @@ class ItemCache:
         # These are items that span more than one chunk and thus cannot be efficiently cached
         # These are items that span more than one chunk and thus cannot be efficiently cached
         # by the object cache (self.decrypted_repository), which would require variable-length structures;
         # by the object cache (self.decrypted_repository), which would require variable-length structures;
         # possible but not worth the effort, see iter_archive_items.
         # possible but not worth the effort, see iter_archive_items.
-        self.fd = tempfile.TemporaryFile(prefix='borg-tmp')
+        self.fd = tempfile.TemporaryFile(prefix="borg-tmp")
 
 
         # A small LRU cache for chunks requested by ItemCache.get() from the object cache,
         # A small LRU cache for chunks requested by ItemCache.get() from the object cache,
         # this significantly speeds up directory traversal and similar operations which
         # this significantly speeds up directory traversal and similar operations which
@@ -123,12 +126,12 @@ class ItemCache:
     def get(self, inode):
     def get(self, inode):
         offset = inode - self.offset
         offset = inode - self.offset
         if offset < 0:
         if offset < 0:
-            raise ValueError('ItemCache.get() called with an invalid inode number')
-        if self.meta[offset] == ord(b'I'):
+            raise ValueError("ItemCache.get() called with an invalid inode number")
+        if self.meta[offset] == ord(b"I"):
             _, chunk_id_relative_offset, chunk_offset = self.indirect_entry_struct.unpack_from(self.meta, offset)
             _, chunk_id_relative_offset, chunk_offset = self.indirect_entry_struct.unpack_from(self.meta, offset)
             chunk_id_offset = offset - chunk_id_relative_offset
             chunk_id_offset = offset - chunk_id_relative_offset
             # bytearray slices are bytearrays as well, explicitly convert to bytes()
             # bytearray slices are bytearrays as well, explicitly convert to bytes()
-            chunk_id = bytes(self.meta[chunk_id_offset:chunk_id_offset + 32])
+            chunk_id = bytes(self.meta[chunk_id_offset : chunk_id_offset + 32])
             chunk = self.chunks.get(chunk_id)
             chunk = self.chunks.get(chunk_id)
             if not chunk:
             if not chunk:
                 csize, chunk = next(self.decrypted_repository.get_many([chunk_id]))
                 csize, chunk = next(self.decrypted_repository.get_many([chunk_id]))
@@ -137,12 +140,12 @@ class ItemCache:
             unpacker = msgpack.Unpacker()
             unpacker = msgpack.Unpacker()
             unpacker.feed(data)
             unpacker.feed(data)
             return Item(internal_dict=next(unpacker))
             return Item(internal_dict=next(unpacker))
-        elif self.meta[offset] == ord(b'S'):
-            fd_offset = int.from_bytes(self.meta[offset + 1:offset + 9], 'little')
+        elif self.meta[offset] == ord(b"S"):
+            fd_offset = int.from_bytes(self.meta[offset + 1 : offset + 9], "little")
             self.fd.seek(fd_offset, io.SEEK_SET)
             self.fd.seek(fd_offset, io.SEEK_SET)
             return Item(internal_dict=next(msgpack.Unpacker(self.fd, read_size=1024)))
             return Item(internal_dict=next(msgpack.Unpacker(self.fd, read_size=1024)))
         else:
         else:
-            raise ValueError('Invalid entry type in self.meta')
+            raise ValueError("Invalid entry type in self.meta")
 
 
     def iter_archive_items(self, archive_item_ids, filter=None, consider_part_files=False):
     def iter_archive_items(self, archive_item_ids, filter=None, consider_part_files=False):
         unpacker = msgpack.Unpacker()
         unpacker = msgpack.Unpacker()
@@ -153,7 +156,7 @@ class ItemCache:
         chunk_begin = 0
         chunk_begin = 0
         # Length of the chunk preceding the current chunk
         # Length of the chunk preceding the current chunk
         last_chunk_length = 0
         last_chunk_length = 0
-        msgpacked_bytes = b''
+        msgpacked_bytes = b""
 
 
         write_offset = self.write_offset
         write_offset = self.write_offset
         meta = self.meta
         meta = self.meta
@@ -163,7 +166,7 @@ class ItemCache:
             # Store the chunk ID in the meta-array
             # Store the chunk ID in the meta-array
             if write_offset + 32 >= len(meta):
             if write_offset + 32 >= len(meta):
                 self.meta = meta = meta + bytes(self.GROW_META_BY)
                 self.meta = meta = meta + bytes(self.GROW_META_BY)
-            meta[write_offset:write_offset + 32] = key
+            meta[write_offset : write_offset + 32] = key
             current_id_offset = write_offset
             current_id_offset = write_offset
             write_offset += 32
             write_offset += 32
 
 
@@ -182,7 +185,7 @@ class ItemCache:
                 # tell() is not helpful for the need_more_data case, but we know it is the remainder
                 # tell() is not helpful for the need_more_data case, but we know it is the remainder
                 # of the data in that case. in the other case, tell() works as expected.
                 # of the data in that case. in the other case, tell() works as expected.
                 length = (len(data) - start) if need_more_data else (unpacker.tell() - stream_offset)
                 length = (len(data) - start) if need_more_data else (unpacker.tell() - stream_offset)
-                msgpacked_bytes += data[start:start+length]
+                msgpacked_bytes += data[start : start + length]
                 stream_offset += length
                 stream_offset += length
 
 
                 if need_more_data:
                 if need_more_data:
@@ -190,14 +193,14 @@ class ItemCache:
                     break
                     break
 
 
                 item = Item(internal_dict=item)
                 item = Item(internal_dict=item)
-                if filter and not filter(item) or not consider_part_files and 'part' in item:
-                    msgpacked_bytes = b''
+                if filter and not filter(item) or not consider_part_files and "part" in item:
+                    msgpacked_bytes = b""
                     continue
                     continue
 
 
                 current_item = msgpacked_bytes
                 current_item = msgpacked_bytes
                 current_item_length = len(current_item)
                 current_item_length = len(current_item)
                 current_spans_chunks = stream_offset - current_item_length < chunk_begin
                 current_spans_chunks = stream_offset - current_item_length < chunk_begin
-                msgpacked_bytes = b''
+                msgpacked_bytes = b""
 
 
                 if write_offset + 9 >= len(meta):
                 if write_offset + 9 >= len(meta):
                     self.meta = meta = meta + bytes(self.GROW_META_BY)
                     self.meta = meta = meta + bytes(self.GROW_META_BY)
@@ -221,11 +224,11 @@ class ItemCache:
                 if current_spans_chunks:
                 if current_spans_chunks:
                     pos = self.fd.seek(0, io.SEEK_END)
                     pos = self.fd.seek(0, io.SEEK_END)
                     self.fd.write(current_item)
                     self.fd.write(current_item)
-                    meta[write_offset:write_offset + 9] = b'S' + pos.to_bytes(8, 'little')
+                    meta[write_offset : write_offset + 9] = b"S" + pos.to_bytes(8, "little")
                     self.direct_items += 1
                     self.direct_items += 1
                 else:
                 else:
                     item_offset = stream_offset - current_item_length - chunk_begin
                     item_offset = stream_offset - current_item_length - chunk_begin
-                    pack_indirect_into(meta, write_offset, b'I', write_offset - current_id_offset, item_offset)
+                    pack_indirect_into(meta, write_offset, b"I", write_offset - current_id_offset, item_offset)
                     self.indirect_items += 1
                     self.indirect_items += 1
                 inode = write_offset + self.offset
                 inode = write_offset + self.offset
                 write_offset += 9
                 write_offset += 9
@@ -236,8 +239,7 @@ class ItemCache:
 
 
 
 
 class FuseBackend:
 class FuseBackend:
-    """Virtual filesystem based on archive(s) to provide information to fuse
-    """
+    """Virtual filesystem based on archive(s) to provide information to fuse"""
 
 
     def __init__(self, key, manifest, repository, args, decrypted_repository):
     def __init__(self, key, manifest, repository, args, decrypted_repository):
         self.repository_uncached = repository
         self.repository_uncached = repository
@@ -307,8 +309,7 @@ class FuseBackend:
         return self.inode_count
         return self.inode_count
 
 
     def _create_dir(self, parent, mtime=None):
     def _create_dir(self, parent, mtime=None):
-        """Create directory
-        """
+        """Create directory"""
         ino = self._allocate_inode()
         ino = self._allocate_inode()
         if mtime is not None:
         if mtime is not None:
             self._items[ino] = Item(internal_dict=self.default_dir.as_dict())
             self._items[ino] = Item(internal_dict=self.default_dir.as_dict())
@@ -319,26 +320,31 @@ class FuseBackend:
         return ino
         return ino
 
 
     def find_inode(self, path, prefix=[]):
     def find_inode(self, path, prefix=[]):
-        segments = prefix + path.split(b'/')
+        segments = prefix + path.split(b"/")
         inode = 1
         inode = 1
         for segment in segments:
         for segment in segments:
             inode = self.contents[inode][segment]
             inode = self.contents[inode][segment]
         return inode
         return inode
 
 
     def _process_archive(self, archive_name, prefix=[]):
     def _process_archive(self, archive_name, prefix=[]):
-        """Build FUSE inode hierarchy from archive metadata
-        """
+        """Build FUSE inode hierarchy from archive metadata"""
         self.file_versions = {}  # for versions mode: original path -> version
         self.file_versions = {}  # for versions mode: original path -> version
         t0 = time.perf_counter()
         t0 = time.perf_counter()
-        archive = Archive(self.repository_uncached, self.key, self._manifest, archive_name,
-                          consider_part_files=self._args.consider_part_files)
+        archive = Archive(
+            self.repository_uncached,
+            self.key,
+            self._manifest,
+            archive_name,
+            consider_part_files=self._args.consider_part_files,
+        )
         strip_components = self._args.strip_components
         strip_components = self._args.strip_components
         matcher = Archiver.build_matcher(self._args.patterns, self._args.paths)
         matcher = Archiver.build_matcher(self._args.patterns, self._args.paths)
         hlm = HardLinkManager(id_type=bytes, info_type=str)  # hlid -> path
         hlm = HardLinkManager(id_type=bytes, info_type=str)  # hlid -> path
 
 
         filter = Archiver.build_filter(matcher, strip_components)
         filter = Archiver.build_filter(matcher, strip_components)
-        for item_inode, item in self.cache.iter_archive_items(archive.metadata.items, filter=filter,
-                                                              consider_part_files=self._args.consider_part_files):
+        for item_inode, item in self.cache.iter_archive_items(
+            archive.metadata.items, filter=filter, consider_part_files=self._args.consider_part_files
+        ):
             if strip_components:
             if strip_components:
                 item.path = os.sep.join(item.path.split(os.sep)[strip_components:])
                 item.path = os.sep.join(item.path.split(os.sep)[strip_components:])
             path = os.fsencode(item.path)
             path = os.fsencode(item.path)
@@ -354,24 +360,24 @@ class FuseBackend:
                 else:
                 else:
                     self._items[inode] = item
                     self._items[inode] = item
                     continue
                     continue
-            segments = prefix + path.split(b'/')
+            segments = prefix + path.split(b"/")
             parent = 1
             parent = 1
             for segment in segments[:-1]:
             for segment in segments[:-1]:
                 parent = self._process_inner(segment, parent)
                 parent = self._process_inner(segment, parent)
             self._process_leaf(segments[-1], item, parent, prefix, is_dir, item_inode, hlm)
             self._process_leaf(segments[-1], item, parent, prefix, is_dir, item_inode, hlm)
         duration = time.perf_counter() - t0
         duration = time.perf_counter() - t0
-        logger.debug('fuse: _process_archive completed in %.1f s for archive %s', duration, archive.name)
+        logger.debug("fuse: _process_archive completed in %.1f s for archive %s", duration, archive.name)
 
 
     def _process_leaf(self, name, item, parent, prefix, is_dir, item_inode, hlm):
     def _process_leaf(self, name, item, parent, prefix, is_dir, item_inode, hlm):
         path = item.path
         path = item.path
         del item.path  # save some space
         del item.path  # save some space
 
 
         def file_version(item, path):
         def file_version(item, path):
-            if 'chunks' in item:
+            if "chunks" in item:
                 file_id = blake2b_128(path)
                 file_id = blake2b_128(path)
                 current_version, previous_id = self.versions_index.get(file_id, (0, None))
                 current_version, previous_id = self.versions_index.get(file_id, (0, None))
 
 
-                contents_id = blake2b_128(b''.join(chunk_id for chunk_id, _ in item.chunks))
+                contents_id = blake2b_128(b"".join(chunk_id for chunk_id, _ in item.chunks))
 
 
                 if contents_id != previous_id:
                 if contents_id != previous_id:
                     current_version += 1
                     current_version += 1
@@ -382,14 +388,14 @@ class FuseBackend:
         def make_versioned_name(name, version, add_dir=False):
         def make_versioned_name(name, version, add_dir=False):
             if add_dir:
             if add_dir:
                 # add intermediate directory with same name as filename
                 # add intermediate directory with same name as filename
-                path_fname = name.rsplit(b'/', 1)
-                name += b'/' + path_fname[-1]
+                path_fname = name.rsplit(b"/", 1)
+                name += b"/" + path_fname[-1]
             # keep original extension at end to avoid confusing tools
             # keep original extension at end to avoid confusing tools
             name, ext = os.path.splitext(name)
             name, ext = os.path.splitext(name)
-            version_enc = os.fsencode('.%05d' % version)
+            version_enc = os.fsencode(".%05d" % version)
             return name + version_enc + ext
             return name + version_enc + ext
 
 
-        if 'hlid' in item:
+        if "hlid" in item:
             link_target = hlm.retrieve(id=item.hlid, default=None)
             link_target = hlm.retrieve(id=item.hlid, default=None)
             if link_target is not None:
             if link_target is not None:
                 # Hard link was extracted previously, just link
                 # Hard link was extracted previously, just link
@@ -401,10 +407,10 @@ class FuseBackend:
                 try:
                 try:
                     inode = self.find_inode(link_target, prefix)
                     inode = self.find_inode(link_target, prefix)
                 except KeyError:
                 except KeyError:
-                    logger.warning('Skipping broken hard link: %s -> %s', path, link_target)
+                    logger.warning("Skipping broken hard link: %s -> %s", path, link_target)
                     return
                     return
                 item = self.get_item(inode)
                 item = self.get_item(inode)
-                item.nlink = item.get('nlink', 1) + 1
+                item.nlink = item.get("nlink", 1) + 1
                 self._items[inode] = item
                 self._items[inode] = item
             else:
             else:
                 inode = item_inode
                 inode = item_inode
@@ -439,31 +445,41 @@ class FuseBackend:
 
 
 
 
 class FuseOperations(llfuse.Operations, FuseBackend):
 class FuseOperations(llfuse.Operations, FuseBackend):
-    """Export archive as a FUSE filesystem
-    """
+    """Export archive as a FUSE filesystem"""
 
 
     def __init__(self, key, repository, manifest, args, decrypted_repository):
     def __init__(self, key, repository, manifest, args, decrypted_repository):
         llfuse.Operations.__init__(self)
         llfuse.Operations.__init__(self)
         FuseBackend.__init__(self, key, manifest, repository, args, decrypted_repository)
         FuseBackend.__init__(self, key, manifest, repository, args, decrypted_repository)
         self.decrypted_repository = decrypted_repository
         self.decrypted_repository = decrypted_repository
-        data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
-        logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
+        data_cache_capacity = int(os.environ.get("BORG_MOUNT_DATA_CACHE_ENTRIES", os.cpu_count() or 1))
+        logger.debug("mount data cache capacity: %d chunks", data_cache_capacity)
         self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
         self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
         self._last_pos = LRUCache(capacity=FILES, dispose=lambda _: None)
         self._last_pos = LRUCache(capacity=FILES, dispose=lambda _: None)
 
 
     def sig_info_handler(self, sig_no, stack):
     def sig_info_handler(self, sig_no, stack):
-        logger.debug('fuse: %d synth inodes, %d edges (%s)',
-                     self.inode_count, len(self.parent),
-                     # getsizeof is the size of the dict itself; key and value are two small-ish integers,
-                     # which are shared due to code structure (this has been verified).
-                     format_file_size(sys.getsizeof(self.parent) + len(self.parent) * sys.getsizeof(self.inode_count)))
-        logger.debug('fuse: %d pending archives', len(self.pending_archives))
-        logger.debug('fuse: ItemCache %d entries (%d direct, %d indirect), meta-array size %s, direct items size %s',
-                     self.cache.direct_items + self.cache.indirect_items, self.cache.direct_items, self.cache.indirect_items,
-                     format_file_size(sys.getsizeof(self.cache.meta)),
-                     format_file_size(os.stat(self.cache.fd.fileno()).st_size))
-        logger.debug('fuse: data cache: %d/%d entries, %s', len(self.data_cache.items()), self.data_cache._capacity,
-                     format_file_size(sum(len(chunk) for key, chunk in self.data_cache.items())))
+        logger.debug(
+            "fuse: %d synth inodes, %d edges (%s)",
+            self.inode_count,
+            len(self.parent),
+            # getsizeof is the size of the dict itself; key and value are two small-ish integers,
+            # which are shared due to code structure (this has been verified).
+            format_file_size(sys.getsizeof(self.parent) + len(self.parent) * sys.getsizeof(self.inode_count)),
+        )
+        logger.debug("fuse: %d pending archives", len(self.pending_archives))
+        logger.debug(
+            "fuse: ItemCache %d entries (%d direct, %d indirect), meta-array size %s, direct items size %s",
+            self.cache.direct_items + self.cache.indirect_items,
+            self.cache.direct_items,
+            self.cache.indirect_items,
+            format_file_size(sys.getsizeof(self.cache.meta)),
+            format_file_size(os.stat(self.cache.fd.fileno()).st_size),
+        )
+        logger.debug(
+            "fuse: data cache: %d/%d entries, %s",
+            len(self.data_cache.items()),
+            self.data_cache._capacity,
+            format_file_size(sum(len(chunk) for key, chunk in self.data_cache.items())),
+        )
         self.decrypted_repository.log_instrumentation()
         self.decrypted_repository.log_instrumentation()
 
 
     def mount(self, mountpoint, mount_options, foreground=False):
     def mount(self, mountpoint, mount_options, foreground=False):
@@ -475,25 +491,25 @@ class FuseOperations(llfuse.Operations, FuseBackend):
                 if option == key:
                 if option == key:
                     options.pop(idx)
                     options.pop(idx)
                     return present
                     return present
-                if option.startswith(key + '='):
+                if option.startswith(key + "="):
                     options.pop(idx)
                     options.pop(idx)
-                    value = option.split('=', 1)[1]
+                    value = option.split("=", 1)[1]
                     if wanted_type is bool:
                     if wanted_type is bool:
                         v = value.lower()
                         v = value.lower()
-                        if v in ('y', 'yes', 'true', '1'):
+                        if v in ("y", "yes", "true", "1"):
                             return True
                             return True
-                        if v in ('n', 'no', 'false', '0'):
+                        if v in ("n", "no", "false", "0"):
                             return False
                             return False
-                        raise ValueError('unsupported value in option: %s' % option)
+                        raise ValueError("unsupported value in option: %s" % option)
                     if wanted_type is int:
                     if wanted_type is int:
                         try:
                         try:
                             return int(value, base=int_base)
                             return int(value, base=int_base)
                         except ValueError:
                         except ValueError:
-                            raise ValueError('unsupported value in option: %s' % option) from None
+                            raise ValueError("unsupported value in option: %s" % option) from None
                     try:
                     try:
                         return wanted_type(value)
                         return wanted_type(value)
                     except ValueError:
                     except ValueError:
-                        raise ValueError('unsupported value in option: %s' % option) from None
+                        raise ValueError("unsupported value in option: %s" % option) from None
             else:
             else:
                 return not_present
                 return not_present
 
 
@@ -502,20 +518,20 @@ class FuseOperations(llfuse.Operations, FuseBackend):
         # cause security issues if used with allow_other mount option.
         # cause security issues if used with allow_other mount option.
         # When not using allow_other or allow_root, access is limited to the
         # When not using allow_other or allow_root, access is limited to the
         # mounting user anyway.
         # mounting user anyway.
-        options = ['fsname=borgfs', 'ro', 'default_permissions']
+        options = ["fsname=borgfs", "ro", "default_permissions"]
         if mount_options:
         if mount_options:
-            options.extend(mount_options.split(','))
-        ignore_permissions = pop_option(options, 'ignore_permissions', True, False, bool)
+            options.extend(mount_options.split(","))
+        ignore_permissions = pop_option(options, "ignore_permissions", True, False, bool)
         if ignore_permissions:
         if ignore_permissions:
             # in case users have a use-case that requires NOT giving "default_permissions",
             # in case users have a use-case that requires NOT giving "default_permissions",
             # this is enabled by the custom "ignore_permissions" mount option which just
             # this is enabled by the custom "ignore_permissions" mount option which just
             # removes "default_permissions" again:
             # removes "default_permissions" again:
-            pop_option(options, 'default_permissions', True, False, bool)
-        self.allow_damaged_files = pop_option(options, 'allow_damaged_files', True, False, bool)
-        self.versions = pop_option(options, 'versions', True, False, bool)
-        self.uid_forced = pop_option(options, 'uid', None, None, int)
-        self.gid_forced = pop_option(options, 'gid', None, None, int)
-        self.umask = pop_option(options, 'umask', 0, 0, int, int_base=8)  # umask is octal, e.g. 222 or 0222
+            pop_option(options, "default_permissions", True, False, bool)
+        self.allow_damaged_files = pop_option(options, "allow_damaged_files", True, False, bool)
+        self.versions = pop_option(options, "versions", True, False, bool)
+        self.uid_forced = pop_option(options, "uid", None, None, int)
+        self.gid_forced = pop_option(options, "gid", None, None, int)
+        self.umask = pop_option(options, "umask", 0, 0, int, int_base=8)  # umask is octal, e.g. 222 or 0222
         dir_uid = self.uid_forced if self.uid_forced is not None else self.default_uid
         dir_uid = self.uid_forced if self.uid_forced is not None else self.default_uid
         dir_gid = self.gid_forced if self.gid_forced is not None else self.default_gid
         dir_gid = self.gid_forced if self.gid_forced is not None else self.default_gid
         dir_user = uid2user(dir_uid)
         dir_user = uid2user(dir_uid)
@@ -523,8 +539,9 @@ class FuseOperations(llfuse.Operations, FuseBackend):
         assert isinstance(dir_user, str)
         assert isinstance(dir_user, str)
         assert isinstance(dir_group, str)
         assert isinstance(dir_group, str)
         dir_mode = 0o40755 & ~self.umask
         dir_mode = 0o40755 & ~self.umask
-        self.default_dir = Item(mode=dir_mode, mtime=int(time.time() * 1e9),
-                                user=dir_user, group=dir_group, uid=dir_uid, gid=dir_gid)
+        self.default_dir = Item(
+            mode=dir_mode, mtime=int(time.time() * 1e9), user=dir_user, group=dir_group, uid=dir_uid, gid=dir_gid
+        )
         self._create_filesystem()
         self._create_filesystem()
         llfuse.init(self, mountpoint, options)
         llfuse.init(self, mountpoint, options)
         if not foreground:
         if not foreground:
@@ -533,7 +550,7 @@ class FuseOperations(llfuse.Operations, FuseBackend):
             else:
             else:
                 with daemonizing() as (old_id, new_id):
                 with daemonizing() as (old_id, new_id):
                     # local repo: the locking process' PID is changing, migrate it:
                     # local repo: the locking process' PID is changing, migrate it:
-                    logger.debug('fuse: mount local repo, going to background: migrating lock.')
+                    logger.debug("fuse: mount local repo, going to background: migrating lock.")
                     self.repository_uncached.migrate_lock(old_id, new_id)
                     self.repository_uncached.migrate_lock(old_id, new_id)
 
 
         # If the file system crashes, we do not want to umount because in that
         # If the file system crashes, we do not want to umount because in that
@@ -543,11 +560,10 @@ class FuseOperations(llfuse.Operations, FuseBackend):
         # mirror.
         # mirror.
         umount = False
         umount = False
         try:
         try:
-            with signal_handler('SIGUSR1', self.sig_info_handler), \
-                 signal_handler('SIGINFO', self.sig_info_handler):
+            with signal_handler("SIGUSR1", self.sig_info_handler), signal_handler("SIGINFO", self.sig_info_handler):
                 signal = fuse_main()
                 signal = fuse_main()
             # no crash and no signal (or it's ^C and we're in the foreground) -> umount request
             # no crash and no signal (or it's ^C and we're in the foreground) -> umount request
-            umount = (signal is None or (signal == SIGINT and foreground))
+            umount = signal is None or (signal == SIGINT and foreground)
         finally:
         finally:
             llfuse.close(umount)
             llfuse.close(umount)
 
 
@@ -573,19 +589,24 @@ class FuseOperations(llfuse.Operations, FuseBackend):
         entry.entry_timeout = 300
         entry.entry_timeout = 300
         entry.attr_timeout = 300
         entry.attr_timeout = 300
         entry.st_mode = item.mode & ~self.umask
         entry.st_mode = item.mode & ~self.umask
-        entry.st_nlink = item.get('nlink', 1)
-        entry.st_uid, entry.st_gid = get_item_uid_gid(item, numeric=self.numeric_ids,
-                                                      uid_default=self.default_uid, gid_default=self.default_gid,
-                                                      uid_forced=self.uid_forced, gid_forced=self.gid_forced)
-        entry.st_rdev = item.get('rdev', 0)
+        entry.st_nlink = item.get("nlink", 1)
+        entry.st_uid, entry.st_gid = get_item_uid_gid(
+            item,
+            numeric=self.numeric_ids,
+            uid_default=self.default_uid,
+            gid_default=self.default_gid,
+            uid_forced=self.uid_forced,
+            gid_forced=self.gid_forced,
+        )
+        entry.st_rdev = item.get("rdev", 0)
         entry.st_size = item.get_size()
         entry.st_size = item.get_size()
         entry.st_blksize = 512
         entry.st_blksize = 512
         entry.st_blocks = (entry.st_size + entry.st_blksize - 1) // entry.st_blksize
         entry.st_blocks = (entry.st_size + entry.st_blksize - 1) // entry.st_blksize
         # note: older archives only have mtime (not atime nor ctime)
         # note: older archives only have mtime (not atime nor ctime)
         entry.st_mtime_ns = mtime_ns = item.mtime
         entry.st_mtime_ns = mtime_ns = item.mtime
-        entry.st_atime_ns = item.get('atime', mtime_ns)
-        entry.st_ctime_ns = item.get('ctime', mtime_ns)
-        entry.st_birthtime_ns = item.get('birthtime', mtime_ns)
+        entry.st_atime_ns = item.get("atime", mtime_ns)
+        entry.st_ctime_ns = item.get("ctime", mtime_ns)
+        entry.st_birthtime_ns = item.get("birthtime", mtime_ns)
         return entry
         return entry
 
 
     @async_wrapper
     @async_wrapper
@@ -595,22 +616,22 @@ class FuseOperations(llfuse.Operations, FuseBackend):
     @async_wrapper
     @async_wrapper
     def listxattr(self, inode, ctx=None):
     def listxattr(self, inode, ctx=None):
         item = self.get_item(inode)
         item = self.get_item(inode)
-        return item.get('xattrs', {}).keys()
+        return item.get("xattrs", {}).keys()
 
 
     @async_wrapper
     @async_wrapper
     def getxattr(self, inode, name, ctx=None):
     def getxattr(self, inode, name, ctx=None):
         item = self.get_item(inode)
         item = self.get_item(inode)
         try:
         try:
-            return item.get('xattrs', {})[name] or b''
+            return item.get("xattrs", {})[name] or b""
         except KeyError:
         except KeyError:
             raise llfuse.FUSEError(llfuse.ENOATTR) from None
             raise llfuse.FUSEError(llfuse.ENOATTR) from None
 
 
     @async_wrapper
     @async_wrapper
     def lookup(self, parent_inode, name, ctx=None):
     def lookup(self, parent_inode, name, ctx=None):
         self.check_pending_archive(parent_inode)
         self.check_pending_archive(parent_inode)
-        if name == b'.':
+        if name == b".":
             inode = parent_inode
             inode = parent_inode
-        elif name == b'..':
+        elif name == b"..":
             inode = self.parent[parent_inode]
             inode = self.parent[parent_inode]
         else:
         else:
             inode = self.contents[parent_inode].get(name)
             inode = self.contents[parent_inode].get(name)
@@ -622,12 +643,14 @@ class FuseOperations(llfuse.Operations, FuseBackend):
     def open(self, inode, flags, ctx=None):
     def open(self, inode, flags, ctx=None):
         if not self.allow_damaged_files:
         if not self.allow_damaged_files:
             item = self.get_item(inode)
             item = self.get_item(inode)
-            if 'chunks_healthy' in item:
+            if "chunks_healthy" in item:
                 # Processed archive items don't carry the path anymore; for converting the inode
                 # Processed archive items don't carry the path anymore; for converting the inode
                 # to the path we'd either have to store the inverse of the current structure,
                 # to the path we'd either have to store the inverse of the current structure,
                 # or search the entire archive. So we just don't print it. It's easy to correlate anyway.
                 # or search the entire archive. So we just don't print it. It's easy to correlate anyway.
-                logger.warning('File has damaged (all-zero) chunks. Try running borg check --repair. '
-                               'Mount with allow_damaged_files to read damaged files.')
+                logger.warning(
+                    "File has damaged (all-zero) chunks. Try running borg check --repair. "
+                    "Mount with allow_damaged_files to read damaged files."
+                )
                 raise llfuse.FUSEError(errno.EIO)
                 raise llfuse.FUSEError(errno.EIO)
         return llfuse.FileInfo(fh=inode) if has_pyfuse3 else inode
         return llfuse.FileInfo(fh=inode) if has_pyfuse3 else inode
 
 
@@ -669,7 +692,7 @@ class FuseOperations(llfuse.Operations, FuseBackend):
                 if offset + n < len(data):
                 if offset + n < len(data):
                     # chunk was only partially read, cache it
                     # chunk was only partially read, cache it
                     self.data_cache[id] = data
                     self.data_cache[id] = data
-            parts.append(data[offset:offset + n])
+            parts.append(data[offset : offset + n])
             offset = 0
             offset = 0
             size -= n
             size -= n
             if not size:
             if not size:
@@ -678,12 +701,13 @@ class FuseOperations(llfuse.Operations, FuseBackend):
                 else:
                 else:
                     self._last_pos[fh] = (chunk_no, chunk_offset)
                     self._last_pos[fh] = (chunk_no, chunk_offset)
                 break
                 break
-        return b''.join(parts)
+        return b"".join(parts)
 
 
     # note: we can't have a generator (with yield) and not a generator (async) in the same method
     # note: we can't have a generator (with yield) and not a generator (async) in the same method
     if has_pyfuse3:
     if has_pyfuse3:
+
         async def readdir(self, fh, off, token):
         async def readdir(self, fh, off, token):
-            entries = [(b'.', fh), (b'..', self.parent[fh])]
+            entries = [(b".", fh), (b"..", self.parent[fh])]
             entries.extend(self.contents[fh].items())
             entries.extend(self.contents[fh].items())
             for i, (name, inode) in enumerate(entries[off:], off):
             for i, (name, inode) in enumerate(entries[off:], off):
                 attrs = self._getattr(inode)
                 attrs = self._getattr(inode)
@@ -691,8 +715,9 @@ class FuseOperations(llfuse.Operations, FuseBackend):
                     break
                     break
 
 
     else:
     else:
+
         def readdir(self, fh, off):
         def readdir(self, fh, off):
-            entries = [(b'.', fh), (b'..', self.parent[fh])]
+            entries = [(b".", fh), (b"..", self.parent[fh])]
             entries.extend(self.contents[fh].items())
             entries.extend(self.contents[fh].items())
             for i, (name, inode) in enumerate(entries[off:], off):
             for i, (name, inode) in enumerate(entries[off:], off):
                 attrs = self._getattr(inode)
                 attrs = self._getattr(inode)

+ 5 - 5
src/borg/fuse_impl.py

@@ -4,11 +4,11 @@ load library for lowlevel FUSE implementation
 
 
 import os
 import os
 
 
-BORG_FUSE_IMPL = os.environ.get('BORG_FUSE_IMPL', 'pyfuse3,llfuse')
+BORG_FUSE_IMPL = os.environ.get("BORG_FUSE_IMPL", "pyfuse3,llfuse")
 
 
-for FUSE_IMPL in BORG_FUSE_IMPL.split(','):
+for FUSE_IMPL in BORG_FUSE_IMPL.split(","):
     FUSE_IMPL = FUSE_IMPL.strip()
     FUSE_IMPL = FUSE_IMPL.strip()
-    if FUSE_IMPL == 'pyfuse3':
+    if FUSE_IMPL == "pyfuse3":
         try:
         try:
             import pyfuse3 as llfuse
             import pyfuse3 as llfuse
         except ImportError:
         except ImportError:
@@ -17,7 +17,7 @@ for FUSE_IMPL in BORG_FUSE_IMPL.split(','):
             has_llfuse = False
             has_llfuse = False
             has_pyfuse3 = True
             has_pyfuse3 = True
             break
             break
-    elif FUSE_IMPL == 'llfuse':
+    elif FUSE_IMPL == "llfuse":
         try:
         try:
             import llfuse
             import llfuse
         except ImportError:
         except ImportError:
@@ -26,7 +26,7 @@ for FUSE_IMPL in BORG_FUSE_IMPL.split(','):
             has_llfuse = True
             has_llfuse = True
             has_pyfuse3 = False
             has_pyfuse3 = False
             break
             break
-    elif FUSE_IMPL == 'none':
+    elif FUSE_IMPL == "none":
         pass
         pass
     else:
     else:
         raise RuntimeError("unknown fuse implementation in BORG_FUSE_IMPL: '%s'" % BORG_FUSE_IMPL)
         raise RuntimeError("unknown fuse implementation in BORG_FUSE_IMPL: '%s'" % BORG_FUSE_IMPL)

+ 1 - 1
src/borg/helpers/__init__.py

@@ -24,7 +24,7 @@ from . import msgpack
 # generic mechanism to enable users to invoke workarounds by setting the
 # generic mechanism to enable users to invoke workarounds by setting the
 # BORG_WORKAROUNDS environment variable to a list of comma-separated strings.
 # BORG_WORKAROUNDS environment variable to a list of comma-separated strings.
 # see the docs for a list of known workaround strings.
 # see the docs for a list of known workaround strings.
-workarounds = tuple(os.environ.get('BORG_WORKAROUNDS', '').split(','))
+workarounds = tuple(os.environ.get("BORG_WORKAROUNDS", "").split(","))
 
 
 """
 """
 The global exit_code variable is used so that modules other than archiver can increase the program exit code if a
 The global exit_code variable is used so that modules other than archiver can increase the program exit code if a

+ 7 - 6
src/borg/helpers/checks.py

@@ -23,15 +23,16 @@ class ExtensionModuleError(Error):
 
 
 def check_extension_modules():
 def check_extension_modules():
     from .. import platform, compress, crypto, item, chunker, hashindex
     from .. import platform, compress, crypto, item, chunker, hashindex
-    if hashindex.API_VERSION != '1.2_01':
+
+    if hashindex.API_VERSION != "1.2_01":
         raise ExtensionModuleError
         raise ExtensionModuleError
-    if chunker.API_VERSION != '1.2_01':
+    if chunker.API_VERSION != "1.2_01":
         raise ExtensionModuleError
         raise ExtensionModuleError
-    if compress.API_VERSION != '1.2_02':
+    if compress.API_VERSION != "1.2_02":
         raise ExtensionModuleError
         raise ExtensionModuleError
-    if crypto.low_level.API_VERSION != '1.3_01':
+    if crypto.low_level.API_VERSION != "1.3_01":
         raise ExtensionModuleError
         raise ExtensionModuleError
-    if item.API_VERSION != '1.2_01':
+    if item.API_VERSION != "1.2_01":
         raise ExtensionModuleError
         raise ExtensionModuleError
-    if platform.API_VERSION != platform.OS_API_VERSION or platform.API_VERSION != '1.2_05':
+    if platform.API_VERSION != platform.OS_API_VERSION or platform.API_VERSION != "1.2_05":
         raise ExtensionModuleError
         raise ExtensionModuleError

+ 3 - 2
src/borg/helpers/datastruct.py

@@ -3,6 +3,7 @@ from .errors import Error
 
 
 class StableDict(dict):
 class StableDict(dict):
     """A dict subclass with stable items() ordering"""
     """A dict subclass with stable items() ordering"""
+
     def items(self):
     def items(self):
         return sorted(super().items())
         return sorted(super().items())
 
 
@@ -20,8 +21,8 @@ class Buffer:
         Initialize the buffer: use allocator(size) call to allocate a buffer.
         Initialize the buffer: use allocator(size) call to allocate a buffer.
         Optionally, set the upper <limit> for the buffer size.
         Optionally, set the upper <limit> for the buffer size.
         """
         """
-        assert callable(allocator), 'must give alloc(size) function as first param'
-        assert limit is None or size <= limit, 'initial size must be <= limit'
+        assert callable(allocator), "must give alloc(size) function as first param"
+        assert limit is None or size <= limit, "initial size must be <= limit"
         self.allocator = allocator
         self.allocator = allocator
         self.limit = limit
         self.limit = limit
         self.resize(size, init=True)
         self.resize(size, init=True)

+ 2 - 0
src/borg/helpers/errors.py

@@ -5,6 +5,7 @@ from ..crypto.low_level import IntegrityError as IntegrityErrorBase
 
 
 class Error(Exception):
 class Error(Exception):
     """Error: {}"""
     """Error: {}"""
+
     # Error base class
     # Error base class
 
 
     # if we raise such an Error and it is only caught by the uppermost
     # if we raise such an Error and it is only caught by the uppermost
@@ -26,6 +27,7 @@ class Error(Exception):
 
 
 class ErrorWithTraceback(Error):
 class ErrorWithTraceback(Error):
     """Error: {}"""
     """Error: {}"""
+
     # like Error, but show a traceback also
     # like Error, but show a traceback also
     traceback = True
     traceback = True
 
 

+ 51 - 43
src/borg/helpers/fs.py

@@ -16,6 +16,7 @@ from ..platformflags import is_win32
 from ..constants import *  # NOQA
 from ..constants import *  # NOQA
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 
 
@@ -50,32 +51,32 @@ def get_base_dir():
     - ~$USER, if USER is set
     - ~$USER, if USER is set
     - ~
     - ~
     """
     """
-    base_dir = os.environ.get('BORG_BASE_DIR') or os.environ.get('HOME')
+    base_dir = os.environ.get("BORG_BASE_DIR") or os.environ.get("HOME")
     # os.path.expanduser() behaves differently for '~' and '~someuser' as
     # os.path.expanduser() behaves differently for '~' and '~someuser' as
     # parameters: when called with an explicit username, the possibly set
     # parameters: when called with an explicit username, the possibly set
     # environment variable HOME is no longer respected. So we have to check if
     # environment variable HOME is no longer respected. So we have to check if
     # it is set and only expand the user's home directory if HOME is unset.
     # it is set and only expand the user's home directory if HOME is unset.
     if not base_dir:
     if not base_dir:
-        base_dir = os.path.expanduser('~%s' % os.environ.get('USER', ''))
+        base_dir = os.path.expanduser("~%s" % os.environ.get("USER", ""))
     return base_dir
     return base_dir
 
 
 
 
 def get_keys_dir():
 def get_keys_dir():
     """Determine where to repository keys and cache"""
     """Determine where to repository keys and cache"""
-    keys_dir = os.environ.get('BORG_KEYS_DIR')
+    keys_dir = os.environ.get("BORG_KEYS_DIR")
     if keys_dir is None:
     if keys_dir is None:
         # note: do not just give this as default to the environment.get(), see issue #5979.
         # note: do not just give this as default to the environment.get(), see issue #5979.
-        keys_dir = os.path.join(get_config_dir(), 'keys')
+        keys_dir = os.path.join(get_config_dir(), "keys")
     ensure_dir(keys_dir)
     ensure_dir(keys_dir)
     return keys_dir
     return keys_dir
 
 
 
 
 def get_security_dir(repository_id=None):
 def get_security_dir(repository_id=None):
     """Determine where to store local security information."""
     """Determine where to store local security information."""
-    security_dir = os.environ.get('BORG_SECURITY_DIR')
+    security_dir = os.environ.get("BORG_SECURITY_DIR")
     if security_dir is None:
     if security_dir is None:
         # note: do not just give this as default to the environment.get(), see issue #5979.
         # note: do not just give this as default to the environment.get(), see issue #5979.
-        security_dir = os.path.join(get_config_dir(), 'security')
+        security_dir = os.path.join(get_config_dir(), "security")
     if repository_id:
     if repository_id:
         security_dir = os.path.join(security_dir, repository_id)
         security_dir = os.path.join(security_dir, repository_id)
     ensure_dir(security_dir)
     ensure_dir(security_dir)
@@ -85,22 +86,28 @@ def get_security_dir(repository_id=None):
 def get_cache_dir():
 def get_cache_dir():
     """Determine where to repository keys and cache"""
     """Determine where to repository keys and cache"""
     # Get cache home path
     # Get cache home path
-    cache_home = os.path.join(get_base_dir(), '.cache')
+    cache_home = os.path.join(get_base_dir(), ".cache")
     # Try to use XDG_CACHE_HOME instead if BORG_BASE_DIR isn't explicitly set
     # Try to use XDG_CACHE_HOME instead if BORG_BASE_DIR isn't explicitly set
-    if not os.environ.get('BORG_BASE_DIR'):
-        cache_home = os.environ.get('XDG_CACHE_HOME', cache_home)
+    if not os.environ.get("BORG_BASE_DIR"):
+        cache_home = os.environ.get("XDG_CACHE_HOME", cache_home)
     # Use BORG_CACHE_DIR if set, otherwise assemble final path from cache home path
     # Use BORG_CACHE_DIR if set, otherwise assemble final path from cache home path
-    cache_dir = os.environ.get('BORG_CACHE_DIR', os.path.join(cache_home, 'borg'))
+    cache_dir = os.environ.get("BORG_CACHE_DIR", os.path.join(cache_home, "borg"))
     # Create path if it doesn't exist yet
     # Create path if it doesn't exist yet
     ensure_dir(cache_dir)
     ensure_dir(cache_dir)
     cache_tag_fn = os.path.join(cache_dir, CACHE_TAG_NAME)
     cache_tag_fn = os.path.join(cache_dir, CACHE_TAG_NAME)
     if not os.path.exists(cache_tag_fn):
     if not os.path.exists(cache_tag_fn):
-        cache_tag_contents = CACHE_TAG_CONTENTS + textwrap.dedent("""
+        cache_tag_contents = (
+            CACHE_TAG_CONTENTS
+            + textwrap.dedent(
+                """
         # This file is a cache directory tag created by Borg.
         # This file is a cache directory tag created by Borg.
         # For information about cache directory tags, see:
         # For information about cache directory tags, see:
         #       http://www.bford.info/cachedir/spec.html
         #       http://www.bford.info/cachedir/spec.html
-        """).encode('ascii')
+        """
+            ).encode("ascii")
+        )
         from ..platform import SaveFile
         from ..platform import SaveFile
+
         with SaveFile(cache_tag_fn, binary=True) as fd:
         with SaveFile(cache_tag_fn, binary=True) as fd:
             fd.write(cache_tag_contents)
             fd.write(cache_tag_contents)
     return cache_dir
     return cache_dir
@@ -109,12 +116,12 @@ def get_cache_dir():
 def get_config_dir():
 def get_config_dir():
     """Determine where to store whole config"""
     """Determine where to store whole config"""
     # Get config home path
     # Get config home path
-    config_home = os.path.join(get_base_dir(), '.config')
+    config_home = os.path.join(get_base_dir(), ".config")
     # Try to use XDG_CONFIG_HOME instead if BORG_BASE_DIR isn't explicitly set
     # Try to use XDG_CONFIG_HOME instead if BORG_BASE_DIR isn't explicitly set
-    if not os.environ.get('BORG_BASE_DIR'):
-        config_home = os.environ.get('XDG_CONFIG_HOME', config_home)
+    if not os.environ.get("BORG_BASE_DIR"):
+        config_home = os.environ.get("XDG_CONFIG_HOME", config_home)
     # Use BORG_CONFIG_DIR if set, otherwise assemble final path from config home path
     # Use BORG_CONFIG_DIR if set, otherwise assemble final path from config home path
-    config_dir = os.environ.get('BORG_CONFIG_DIR', os.path.join(config_home, 'borg'))
+    config_dir = os.environ.get("BORG_CONFIG_DIR", os.path.join(config_home, "borg"))
     # Create path if it doesn't exist yet
     # Create path if it doesn't exist yet
     ensure_dir(config_dir)
     ensure_dir(config_dir)
     return config_dir
     return config_dir
@@ -130,7 +137,7 @@ def dir_is_cachedir(path):
     tag_path = os.path.join(path, CACHE_TAG_NAME)
     tag_path = os.path.join(path, CACHE_TAG_NAME)
     try:
     try:
         if os.path.exists(tag_path):
         if os.path.exists(tag_path):
-            with open(tag_path, 'rb') as tag_file:
+            with open(tag_path, "rb") as tag_file:
                 tag_data = tag_file.read(len(CACHE_TAG_CONTENTS))
                 tag_data = tag_file.read(len(CACHE_TAG_CONTENTS))
                 if tag_data == CACHE_TAG_CONTENTS:
                 if tag_data == CACHE_TAG_CONTENTS:
                     return True
                     return True
@@ -157,13 +164,12 @@ def dir_is_tagged(path, exclude_caches, exclude_if_present):
     return tag_names
     return tag_names
 
 
 
 
-_safe_re = re.compile(r'^((\.\.)?/+)+')
+_safe_re = re.compile(r"^((\.\.)?/+)+")
 
 
 
 
 def make_path_safe(path):
 def make_path_safe(path):
-    """Make path safe by making it relative and local
-    """
-    return _safe_re.sub('', path) or '.'
+    """Make path safe by making it relative and local"""
+    return _safe_re.sub("", path) or "."
 
 
 
 
 class HardLinkManager:
 class HardLinkManager:
@@ -189,6 +195,7 @@ class HardLinkManager:
        For better hardlink support (including the very first hardlink item for each group of same-target hardlinks),
        For better hardlink support (including the very first hardlink item for each group of same-target hardlinks),
        we would need a 2-pass processing, which is not yet implemented.
        we would need a 2-pass processing, which is not yet implemented.
     """
     """
+
     def __init__(self, *, id_type, info_type):
     def __init__(self, *, id_type, info_type):
         self._map = {}
         self._map = {}
         self.id_type = id_type
         self.id_type = id_type
@@ -198,21 +205,21 @@ class HardLinkManager:
         return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
         return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
 
 
     def borg1_hardlink_master(self, item):  # legacy
     def borg1_hardlink_master(self, item):  # legacy
-        return item.get('hardlink_master', True) and 'source' not in item and self.borg1_hardlinkable(item.mode)
+        return item.get("hardlink_master", True) and "source" not in item and self.borg1_hardlinkable(item.mode)
 
 
     def borg1_hardlink_slave(self, item):  # legacy
     def borg1_hardlink_slave(self, item):  # legacy
-        return 'source' in item and self.borg1_hardlinkable(item.mode)
+        return "source" in item and self.borg1_hardlinkable(item.mode)
 
 
     def hardlink_id_from_path(self, path):
     def hardlink_id_from_path(self, path):
         """compute a hardlink id from a path"""
         """compute a hardlink id from a path"""
         assert isinstance(path, str)
         assert isinstance(path, str)
-        return hashlib.sha256(path.encode('utf-8', errors='surrogateescape')).digest()
+        return hashlib.sha256(path.encode("utf-8", errors="surrogateescape")).digest()
 
 
     def hardlink_id_from_inode(self, *, ino, dev):
     def hardlink_id_from_inode(self, *, ino, dev):
         """compute a hardlink id from an inode"""
         """compute a hardlink id from an inode"""
         assert isinstance(ino, int)
         assert isinstance(ino, int)
         assert isinstance(dev, int)
         assert isinstance(dev, int)
-        return hashlib.sha256(f'{ino}/{dev}'.encode()).digest()
+        return hashlib.sha256(f"{ino}/{dev}".encode()).digest()
 
 
     def remember(self, *, id, info):
     def remember(self, *, id, info):
         """
         """
@@ -243,7 +250,7 @@ def scandir_keyfunc(dirent):
         return (0, dirent.inode())
         return (0, dirent.inode())
     except OSError as e:
     except OSError as e:
         # maybe a permission denied error while doing a stat() on the dirent
         # maybe a permission denied error while doing a stat() on the dirent
-        logger.debug('scandir_inorder: Unable to stat %s: %s', dirent.path, e)
+        logger.debug("scandir_inorder: Unable to stat %s: %s", dirent.path, e)
         # order this dirent after all the others lexically by file name
         # order this dirent after all the others lexically by file name
         # we may not break the whole scandir just because of an exception in one dirent
         # we may not break the whole scandir just because of an exception in one dirent
         # ignore the exception for now, since another stat will be done later anyways
         # ignore the exception for now, since another stat will be done later anyways
@@ -268,7 +275,7 @@ def secure_erase(path, *, avoid_collateral_damage):
     If avoid_collateral_damage is False, we always secure erase.
     If avoid_collateral_damage is False, we always secure erase.
     If there are hardlinks pointing to the same inode as <path>, they will contain random garbage afterwards.
     If there are hardlinks pointing to the same inode as <path>, they will contain random garbage afterwards.
     """
     """
-    with open(path, 'r+b') as fd:
+    with open(path, "r+b") as fd:
         st = os.stat(fd.fileno())
         st = os.stat(fd.fileno())
         if not (st.st_nlink > 1 and avoid_collateral_damage):
         if not (st.st_nlink > 1 and avoid_collateral_damage):
             fd.write(os.urandom(st.st_size))
             fd.write(os.urandom(st.st_size))
@@ -303,7 +310,7 @@ def safe_unlink(path):
         # no other hardlink! try to recover free space by truncating this file.
         # no other hardlink! try to recover free space by truncating this file.
         try:
         try:
             # Do not create *path* if it does not exist, open for truncation in r+b mode (=O_RDWR|O_BINARY).
             # Do not create *path* if it does not exist, open for truncation in r+b mode (=O_RDWR|O_BINARY).
-            with open(path, 'r+b') as fd:
+            with open(path, "r+b") as fd:
                 fd.truncate()
                 fd.truncate()
         except OSError:
         except OSError:
             # truncate didn't work, so we still have the original unlink issue - give up:
             # truncate didn't work, so we still have the original unlink issue - give up:
@@ -314,10 +321,10 @@ def safe_unlink(path):
 
 
 
 
 def dash_open(path, mode):
 def dash_open(path, mode):
-    assert '+' not in mode  # the streams are either r or w, but never both
-    if path == '-':
-        stream = sys.stdin if 'r' in mode else sys.stdout
-        return stream.buffer if 'b' in mode else stream
+    assert "+" not in mode  # the streams are either r or w, but never both
+    if path == "-":
+        stream = sys.stdin if "r" in mode else sys.stdout
+        return stream.buffer if "b" in mode else stream
     else:
     else:
         return open(path, mode)
         return open(path, mode)
 
 
@@ -325,17 +332,17 @@ def dash_open(path, mode):
 def O_(*flags):
 def O_(*flags):
     result = 0
     result = 0
     for flag in flags:
     for flag in flags:
-        result |= getattr(os, 'O_' + flag, 0)
+        result |= getattr(os, "O_" + flag, 0)
     return result
     return result
 
 
 
 
-flags_base = O_('BINARY', 'NOCTTY', 'RDONLY')
-flags_special = flags_base | O_('NOFOLLOW')  # BLOCK == wait when reading devices or fifos
+flags_base = O_("BINARY", "NOCTTY", "RDONLY")
+flags_special = flags_base | O_("NOFOLLOW")  # BLOCK == wait when reading devices or fifos
 flags_special_follow = flags_base  # BLOCK == wait when reading symlinked devices or fifos
 flags_special_follow = flags_base  # BLOCK == wait when reading symlinked devices or fifos
-flags_normal = flags_base | O_('NONBLOCK', 'NOFOLLOW')
-flags_noatime = flags_normal | O_('NOATIME')
-flags_root = O_('RDONLY')
-flags_dir = O_('DIRECTORY', 'RDONLY', 'NOFOLLOW')
+flags_normal = flags_base | O_("NONBLOCK", "NOFOLLOW")
+flags_noatime = flags_normal | O_("NOATIME")
+flags_root = O_("RDONLY")
+flags_dir = O_("DIRECTORY", "RDONLY", "NOFOLLOW")
 
 
 
 
 def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
 def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
@@ -362,7 +369,7 @@ def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
         return None
         return None
     _flags_normal = flags
     _flags_normal = flags
     if noatime:
     if noatime:
-        _flags_noatime = _flags_normal | O_('NOATIME')
+        _flags_noatime = _flags_normal | O_("NOATIME")
         try:
         try:
             # if we have O_NOATIME, this likely will succeed if we are root or owner of file:
             # if we have O_NOATIME, this likely will succeed if we are root or owner of file:
             fd = os.open(fname, _flags_noatime, dir_fd=parent_fd)
             fd = os.open(fname, _flags_noatime, dir_fd=parent_fd)
@@ -375,7 +382,8 @@ def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
         except OSError as exc:
         except OSError as exc:
             # O_NOATIME causes EROFS when accessing a volume shadow copy in WSL1
             # O_NOATIME causes EROFS when accessing a volume shadow copy in WSL1
             from . import workarounds
             from . import workarounds
-            if 'retry_erofs' in workarounds and exc.errno == errno.EROFS and _flags_noatime != _flags_normal:
+
+            if "retry_erofs" in workarounds and exc.errno == errno.EROFS and _flags_noatime != _flags_normal:
                 fd = os.open(fname, _flags_normal, dir_fd=parent_fd)
                 fd = os.open(fname, _flags_normal, dir_fd=parent_fd)
             else:
             else:
                 raise
                 raise
@@ -407,6 +415,6 @@ def os_stat(*, path=None, parent_fd=None, name=None, follow_symlinks=False):
 def umount(mountpoint):
 def umount(mountpoint):
     env = prepare_subprocess_env(system=True)
     env = prepare_subprocess_env(system=True)
     try:
     try:
-        return subprocess.call(['fusermount', '-u', mountpoint], env=env)
+        return subprocess.call(["fusermount", "-u", mountpoint], env=env)
     except FileNotFoundError:
     except FileNotFoundError:
-        return subprocess.call(['umount', mountpoint], env=env)
+        return subprocess.call(["umount", mountpoint], env=env)

+ 53 - 38
src/borg/helpers/manifest.py

@@ -9,6 +9,7 @@ from operator import attrgetter
 from .errors import Error
 from .errors import Error
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .datastruct import StableDict
 from .datastruct import StableDict
@@ -26,10 +27,10 @@ class MandatoryFeatureUnsupported(Error):
     """Unsupported repository feature(s) {}. A newer version of borg is required to access this repository."""
     """Unsupported repository feature(s) {}. A newer version of borg is required to access this repository."""
 
 
 
 
-ArchiveInfo = namedtuple('ArchiveInfo', 'name id ts')
+ArchiveInfo = namedtuple("ArchiveInfo", "name id ts")
 
 
-AI_HUMAN_SORT_KEYS = ['timestamp'] + list(ArchiveInfo._fields)
-AI_HUMAN_SORT_KEYS.remove('ts')
+AI_HUMAN_SORT_KEYS = ["timestamp"] + list(ArchiveInfo._fields)
+AI_HUMAN_SORT_KEYS.remove("ts")
 
 
 
 
 class Archives(abc.MutableMapping):
 class Archives(abc.MutableMapping):
@@ -38,6 +39,7 @@ class Archives(abc.MutableMapping):
     and we can deal with str keys (and it internally encodes to byte keys) and either
     and we can deal with str keys (and it internally encodes to byte keys) and either
     str timestamps or datetime timestamps.
     str timestamps or datetime timestamps.
     """
     """
+
     def __init__(self):
     def __init__(self):
         # key: str archive name, value: dict('id': bytes_id, 'time': str_iso_ts)
         # key: str archive name, value: dict('id': bytes_id, 'time': str_iso_ts)
         self._archives = {}
         self._archives = {}
@@ -53,8 +55,8 @@ class Archives(abc.MutableMapping):
         values = self._archives.get(name)
         values = self._archives.get(name)
         if values is None:
         if values is None:
             raise KeyError
             raise KeyError
-        ts = parse_timestamp(values['time'])
-        return ArchiveInfo(name=name, id=values['id'], ts=ts)
+        ts = parse_timestamp(values["time"])
+        return ArchiveInfo(name=name, id=values["id"], ts=ts)
 
 
     def __setitem__(self, name, info):
     def __setitem__(self, name, info):
         assert isinstance(name, str)
         assert isinstance(name, str)
@@ -64,13 +66,15 @@ class Archives(abc.MutableMapping):
         if isinstance(ts, datetime):
         if isinstance(ts, datetime):
             ts = ts.replace(tzinfo=None).strftime(ISO_FORMAT)
             ts = ts.replace(tzinfo=None).strftime(ISO_FORMAT)
         assert isinstance(ts, str)
         assert isinstance(ts, str)
-        self._archives[name] = {'id': id, 'time': ts}
+        self._archives[name] = {"id": id, "time": ts}
 
 
     def __delitem__(self, name):
     def __delitem__(self, name):
         assert isinstance(name, str)
         assert isinstance(name, str)
         del self._archives[name]
         del self._archives[name]
 
 
-    def list(self, *, glob=None, match_end=r'\Z', sort_by=(), consider_checkpoints=True, first=None, last=None, reverse=False):
+    def list(
+        self, *, glob=None, match_end=r"\Z", sort_by=(), consider_checkpoints=True, first=None, last=None, reverse=False
+    ):
         """
         """
         Return list of ArchiveInfo instances according to the parameters.
         Return list of ArchiveInfo instances according to the parameters.
 
 
@@ -84,17 +88,17 @@ class Archives(abc.MutableMapping):
               some callers EXPECT to iterate over all archives in a repo for correct operation.
               some callers EXPECT to iterate over all archives in a repo for correct operation.
         """
         """
         if isinstance(sort_by, (str, bytes)):
         if isinstance(sort_by, (str, bytes)):
-            raise TypeError('sort_by must be a sequence of str')
-        regex = re.compile(shellpattern.translate(glob or '*', match_end=match_end))
+            raise TypeError("sort_by must be a sequence of str")
+        regex = re.compile(shellpattern.translate(glob or "*", match_end=match_end))
         archives = [x for x in self.values() if regex.match(x.name) is not None]
         archives = [x for x in self.values() if regex.match(x.name) is not None]
         if not consider_checkpoints:
         if not consider_checkpoints:
-            archives = [x for x in archives if '.checkpoint' not in x.name]
+            archives = [x for x in archives if ".checkpoint" not in x.name]
         for sortkey in reversed(sort_by):
         for sortkey in reversed(sort_by):
             archives.sort(key=attrgetter(sortkey))
             archives.sort(key=attrgetter(sortkey))
         if first:
         if first:
             archives = archives[:first]
             archives = archives[:first]
         elif last:
         elif last:
-            archives = archives[max(len(archives) - last, 0):]
+            archives = archives[max(len(archives) - last, 0) :]
         if reverse:
         if reverse:
             archives.reverse()
             archives.reverse()
         return archives
         return archives
@@ -103,17 +107,25 @@ class Archives(abc.MutableMapping):
         """
         """
         get a list of archives, considering --first/last/prefix/glob-archives/sort/consider-checkpoints cmdline args
         get a list of archives, considering --first/last/prefix/glob-archives/sort/consider-checkpoints cmdline args
         """
         """
-        name = getattr(args, 'name', None)
-        consider_checkpoints = getattr(args, 'consider_checkpoints', None)
+        name = getattr(args, "name", None)
+        consider_checkpoints = getattr(args, "consider_checkpoints", None)
         if name is not None:
         if name is not None:
-            raise Error('Giving a specific name is incompatible with options --first, --last, -a / --glob-archives, and --consider-checkpoints.')
-        return self.list(sort_by=args.sort_by.split(','), consider_checkpoints=consider_checkpoints, glob=args.glob_archives, first=args.first, last=args.last)
+            raise Error(
+                "Giving a specific name is incompatible with options --first, --last, -a / --glob-archives, and --consider-checkpoints."
+            )
+        return self.list(
+            sort_by=args.sort_by.split(","),
+            consider_checkpoints=consider_checkpoints,
+            glob=args.glob_archives,
+            first=args.first,
+            last=args.last,
+        )
 
 
     def set_raw_dict(self, d):
     def set_raw_dict(self, d):
         """set the dict we get from the msgpack unpacker"""
         """set the dict we get from the msgpack unpacker"""
         for k, v in d.items():
         for k, v in d.items():
             assert isinstance(k, str)
             assert isinstance(k, str)
-            assert isinstance(v, dict) and 'id' in v and 'time' in v
+            assert isinstance(v, dict) and "id" in v and "time" in v
             self._archives[k] = v
             self._archives[k] = v
 
 
     def get_raw_dict(self):
     def get_raw_dict(self):
@@ -122,7 +134,6 @@ class Archives(abc.MutableMapping):
 
 
 
 
 class Manifest:
 class Manifest:
-
     @enum.unique
     @enum.unique
     class Operation(enum.Enum):
     class Operation(enum.Enum):
         # The comments here only roughly describe the scope of each feature. In the end, additions need to be
         # The comments here only roughly describe the scope of each feature. In the end, additions need to be
@@ -133,25 +144,25 @@ class Manifest:
 
 
         # The READ operation describes which features are needed to safely list and extract the archives in the
         # The READ operation describes which features are needed to safely list and extract the archives in the
         # repository.
         # repository.
-        READ = 'read'
+        READ = "read"
         # The CHECK operation is for all operations that need either to understand every detail
         # The CHECK operation is for all operations that need either to understand every detail
         # of the repository (for consistency checks and repairs) or are seldom used functions that just
         # of the repository (for consistency checks and repairs) or are seldom used functions that just
         # should use the most restrictive feature set because more fine grained compatibility tracking is
         # should use the most restrictive feature set because more fine grained compatibility tracking is
         # not needed.
         # not needed.
-        CHECK = 'check'
+        CHECK = "check"
         # The WRITE operation is for adding archives. Features here ensure that older clients don't add archives
         # The WRITE operation is for adding archives. Features here ensure that older clients don't add archives
         # in an old format, or is used to lock out clients that for other reasons can no longer safely add new
         # in an old format, or is used to lock out clients that for other reasons can no longer safely add new
         # archives.
         # archives.
-        WRITE = 'write'
+        WRITE = "write"
         # The DELETE operation is for all operations (like archive deletion) that need a 100% correct reference
         # The DELETE operation is for all operations (like archive deletion) that need a 100% correct reference
         # count and the need to be able to find all (directly and indirectly) referenced chunks of a given archive.
         # count and the need to be able to find all (directly and indirectly) referenced chunks of a given archive.
-        DELETE = 'delete'
+        DELETE = "delete"
 
 
     NO_OPERATION_CHECK = tuple()
     NO_OPERATION_CHECK = tuple()
 
 
     SUPPORTED_REPO_FEATURES = frozenset([])
     SUPPORTED_REPO_FEATURES = frozenset([])
 
 
-    MANIFEST_ID = b'\0' * 32
+    MANIFEST_ID = b"\0" * 32
 
 
     def __init__(self, key, repository, item_keys=None):
     def __init__(self, key, repository, item_keys=None):
         self.archives = Archives()
         self.archives = Archives()
@@ -175,6 +186,7 @@ class Manifest:
         from ..item import ManifestItem
         from ..item import ManifestItem
         from ..crypto.key import key_factory, tam_required_file, tam_required
         from ..crypto.key import key_factory, tam_required_file, tam_required
         from ..repository import Repository
         from ..repository import Repository
+
         try:
         try:
             cdata = repository.get(cls.MANIFEST_ID)
             cdata = repository.get(cls.MANIFEST_ID)
         except Repository.ObjectNotFound:
         except Repository.ObjectNotFound:
@@ -183,26 +195,28 @@ class Manifest:
             key = key_factory(repository, cdata)
             key = key_factory(repository, cdata)
         manifest = cls(key, repository)
         manifest = cls(key, repository)
         data = key.decrypt(cls.MANIFEST_ID, cdata)
         data = key.decrypt(cls.MANIFEST_ID, cdata)
-        manifest_dict, manifest.tam_verified = key.unpack_and_verify_manifest(data, force_tam_not_required=force_tam_not_required)
+        manifest_dict, manifest.tam_verified = key.unpack_and_verify_manifest(
+            data, force_tam_not_required=force_tam_not_required
+        )
         m = ManifestItem(internal_dict=manifest_dict)
         m = ManifestItem(internal_dict=manifest_dict)
         manifest.id = key.id_hash(data)
         manifest.id = key.id_hash(data)
-        if m.get('version') not in (1, 2):
-            raise ValueError('Invalid manifest version')
+        if m.get("version") not in (1, 2):
+            raise ValueError("Invalid manifest version")
         manifest.archives.set_raw_dict(m.archives)
         manifest.archives.set_raw_dict(m.archives)
-        manifest.timestamp = m.get('timestamp')
+        manifest.timestamp = m.get("timestamp")
         manifest.config = m.config
         manifest.config = m.config
         # valid item keys are whatever is known in the repo or every key we know
         # valid item keys are whatever is known in the repo or every key we know
-        manifest.item_keys = ITEM_KEYS | frozenset(m.get('item_keys', []))
+        manifest.item_keys = ITEM_KEYS | frozenset(m.get("item_keys", []))
 
 
         if manifest.tam_verified:
         if manifest.tam_verified:
-            manifest_required = manifest.config.get('tam_required', False)
+            manifest_required = manifest.config.get("tam_required", False)
             security_required = tam_required(repository)
             security_required = tam_required(repository)
             if manifest_required and not security_required:
             if manifest_required and not security_required:
-                logger.debug('Manifest is TAM verified and says TAM is required, updating security database...')
+                logger.debug("Manifest is TAM verified and says TAM is required, updating security database...")
                 file = tam_required_file(repository)
                 file = tam_required_file(repository)
-                open(file, 'w').close()
+                open(file, "w").close()
             if not manifest_required and security_required:
             if not manifest_required and security_required:
-                logger.debug('Manifest is TAM verified and says TAM is *not* required, updating security database...')
+                logger.debug("Manifest is TAM verified and says TAM is *not* required, updating security database...")
                 os.unlink(tam_required_file(repository))
                 os.unlink(tam_required_file(repository))
         manifest.check_repository_compatibility(operations)
         manifest.check_repository_compatibility(operations)
         return manifest, key
         return manifest, key
@@ -210,32 +224,33 @@ class Manifest:
     def check_repository_compatibility(self, operations):
     def check_repository_compatibility(self, operations):
         for operation in operations:
         for operation in operations:
             assert isinstance(operation, self.Operation)
             assert isinstance(operation, self.Operation)
-            feature_flags = self.config.get('feature_flags', None)
+            feature_flags = self.config.get("feature_flags", None)
             if feature_flags is None:
             if feature_flags is None:
                 return
                 return
             if operation.value not in feature_flags:
             if operation.value not in feature_flags:
                 continue
                 continue
             requirements = feature_flags[operation.value]
             requirements = feature_flags[operation.value]
-            if 'mandatory' in requirements:
-                unsupported = set(requirements['mandatory']) - self.SUPPORTED_REPO_FEATURES
+            if "mandatory" in requirements:
+                unsupported = set(requirements["mandatory"]) - self.SUPPORTED_REPO_FEATURES
                 if unsupported:
                 if unsupported:
                     raise MandatoryFeatureUnsupported(list(unsupported))
                     raise MandatoryFeatureUnsupported(list(unsupported))
 
 
     def get_all_mandatory_features(self):
     def get_all_mandatory_features(self):
         result = {}
         result = {}
-        feature_flags = self.config.get('feature_flags', None)
+        feature_flags = self.config.get("feature_flags", None)
         if feature_flags is None:
         if feature_flags is None:
             return result
             return result
 
 
         for operation, requirements in feature_flags.items():
         for operation, requirements in feature_flags.items():
-            if 'mandatory' in requirements:
-                result[operation] = set(requirements['mandatory'])
+            if "mandatory" in requirements:
+                result[operation] = set(requirements["mandatory"])
         return result
         return result
 
 
     def write(self):
     def write(self):
         from ..item import ManifestItem
         from ..item import ManifestItem
+
         if self.key.tam_required:
         if self.key.tam_required:
-            self.config['tam_required'] = True
+            self.config["tam_required"] = True
         # self.timestamp needs to be strictly monotonically increasing. Clocks often are not set correctly
         # self.timestamp needs to be strictly monotonically increasing. Clocks often are not set correctly
         if self.timestamp is None:
         if self.timestamp is None:
             self.timestamp = datetime.utcnow().strftime(ISO_FORMAT)
             self.timestamp = datetime.utcnow().strftime(ISO_FORMAT)

+ 55 - 48
src/borg/helpers/misc.py

@@ -10,6 +10,7 @@ from itertools import islice
 from operator import attrgetter
 from operator import attrgetter
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .time import to_localtime
 from .time import to_localtime
@@ -30,15 +31,17 @@ def prune_within(archives, hours, kept_because):
     return result
     return result
 
 
 
 
-PRUNING_PATTERNS = OrderedDict([
-    ("secondly", '%Y-%m-%d %H:%M:%S'),
-    ("minutely", '%Y-%m-%d %H:%M'),
-    ("hourly", '%Y-%m-%d %H'),
-    ("daily", '%Y-%m-%d'),
-    ("weekly", '%G-%V'),
-    ("monthly", '%Y-%m'),
-    ("yearly", '%Y'),
-])
+PRUNING_PATTERNS = OrderedDict(
+    [
+        ("secondly", "%Y-%m-%d %H:%M:%S"),
+        ("minutely", "%Y-%m-%d %H:%M"),
+        ("hourly", "%Y-%m-%d %H"),
+        ("daily", "%Y-%m-%d"),
+        ("weekly", "%G-%V"),
+        ("monthly", "%Y-%m"),
+        ("yearly", "%Y"),
+    ]
+)
 
 
 
 
 def prune_split(archives, rule, n, kept_because=None):
 def prune_split(archives, rule, n, kept_because=None):
@@ -51,7 +54,7 @@ def prune_split(archives, rule, n, kept_because=None):
         return keep
         return keep
 
 
     a = None
     a = None
-    for a in sorted(archives, key=attrgetter('ts'), reverse=True):
+    for a in sorted(archives, key=attrgetter("ts"), reverse=True):
         period = to_localtime(a.ts).strftime(pattern)
         period = to_localtime(a.ts).strftime(pattern)
         if period != last:
         if period != last:
             last = period
             last = period
@@ -63,14 +66,14 @@ def prune_split(archives, rule, n, kept_because=None):
     # Keep oldest archive if we didn't reach the target retention count
     # Keep oldest archive if we didn't reach the target retention count
     if a is not None and len(keep) < n and a.id not in kept_because:
     if a is not None and len(keep) < n and a.id not in kept_because:
         keep.append(a)
         keep.append(a)
-        kept_because[a.id] = (rule+"[oldest]", len(keep))
+        kept_because[a.id] = (rule + "[oldest]", len(keep))
     return keep
     return keep
 
 
 
 
 def sysinfo():
 def sysinfo():
-    show_sysinfo = os.environ.get('BORG_SHOW_SYSINFO', 'yes').lower()
-    if show_sysinfo == 'no':
-        return ''
+    show_sysinfo = os.environ.get("BORG_SHOW_SYSINFO", "yes").lower()
+    if show_sysinfo == "no":
+        return ""
 
 
     python_implementation = platform.python_implementation()
     python_implementation = platform.python_implementation()
     python_version = platform.python_version()
     python_version = platform.python_version()
@@ -80,30 +83,34 @@ def sysinfo():
         uname = os.uname()
         uname = os.uname()
     except AttributeError:
     except AttributeError:
         uname = None
         uname = None
-    if sys.platform.startswith('linux'):
-        linux_distribution = ('Unknown Linux', '', '')
+    if sys.platform.startswith("linux"):
+        linux_distribution = ("Unknown Linux", "", "")
     else:
     else:
         linux_distribution = None
         linux_distribution = None
     try:
     try:
-        msgpack_version = '.'.join(str(v) for v in msgpack.version)
+        msgpack_version = ".".join(str(v) for v in msgpack.version)
     except:
     except:
-        msgpack_version = 'unknown'
+        msgpack_version = "unknown"
     from ..fuse_impl import llfuse, BORG_FUSE_IMPL
     from ..fuse_impl import llfuse, BORG_FUSE_IMPL
-    llfuse_name = llfuse.__name__ if llfuse else 'None'
-    llfuse_version = (' %s' % llfuse.__version__) if llfuse else ''
-    llfuse_info = f'{llfuse_name}{llfuse_version} [{BORG_FUSE_IMPL}]'
+
+    llfuse_name = llfuse.__name__ if llfuse else "None"
+    llfuse_version = (" %s" % llfuse.__version__) if llfuse else ""
+    llfuse_info = f"{llfuse_name}{llfuse_version} [{BORG_FUSE_IMPL}]"
     info = []
     info = []
     if uname is not None:
     if uname is not None:
-        info.append('Platform: {}'.format(' '.join(uname)))
+        info.append("Platform: {}".format(" ".join(uname)))
     if linux_distribution is not None:
     if linux_distribution is not None:
-        info.append('Linux: %s %s %s' % linux_distribution)
-    info.append('Borg: {}  Python: {} {} msgpack: {} fuse: {}'.format(
-                borg_version, python_implementation, python_version, msgpack_version, llfuse_info))
-    info.append('PID: %d  CWD: %s' % (os.getpid(), os.getcwd()))
-    info.append('sys.argv: %r' % sys.argv)
-    info.append('SSH_ORIGINAL_COMMAND: %r' % os.environ.get('SSH_ORIGINAL_COMMAND'))
-    info.append('')
-    return '\n'.join(info)
+        info.append("Linux: %s %s %s" % linux_distribution)
+    info.append(
+        "Borg: {}  Python: {} {} msgpack: {} fuse: {}".format(
+            borg_version, python_implementation, python_version, msgpack_version, llfuse_info
+        )
+    )
+    info.append("PID: %d  CWD: %s" % (os.getpid(), os.getcwd()))
+    info.append("sys.argv: %r" % sys.argv)
+    info.append("SSH_ORIGINAL_COMMAND: %r" % os.environ.get("SSH_ORIGINAL_COMMAND"))
+    info.append("")
+    return "\n".join(info)
 
 
 
 
 def log_multi(*msgs, level=logging.INFO, logger=logger):
 def log_multi(*msgs, level=logging.INFO, logger=logger):
@@ -133,7 +140,7 @@ class ChunkIteratorFileWrapper:
         """
         """
         self.chunk_iterator = chunk_iterator
         self.chunk_iterator = chunk_iterator
         self.chunk_offset = 0
         self.chunk_offset = 0
-        self.chunk = b''
+        self.chunk = b""
         self.exhausted = False
         self.exhausted = False
         self.read_callback = read_callback
         self.read_callback = read_callback
 
 
@@ -152,11 +159,11 @@ class ChunkIteratorFileWrapper:
 
 
     def _read(self, nbytes):
     def _read(self, nbytes):
         if not nbytes:
         if not nbytes:
-            return b''
+            return b""
         remaining = self._refill()
         remaining = self._refill()
         will_read = min(remaining, nbytes)
         will_read = min(remaining, nbytes)
         self.chunk_offset += will_read
         self.chunk_offset += will_read
-        return self.chunk[self.chunk_offset - will_read:self.chunk_offset]
+        return self.chunk[self.chunk_offset - will_read : self.chunk_offset]
 
 
     def read(self, nbytes):
     def read(self, nbytes):
         parts = []
         parts = []
@@ -166,7 +173,7 @@ class ChunkIteratorFileWrapper:
             parts.append(read_data)
             parts.append(read_data)
             if self.read_callback:
             if self.read_callback:
                 self.read_callback(read_data)
                 self.read_callback(read_data)
-        return b''.join(parts)
+        return b"".join(parts)
 
 
 
 
 def open_item(archive, item):
 def open_item(archive, item):
@@ -207,7 +214,7 @@ class ErrorIgnoringTextIOWrapper(io.TextIOWrapper):
                     super().close()
                     super().close()
                 except OSError:
                 except OSError:
                     pass
                     pass
-        return ''
+        return ""
 
 
     def write(self, s):
     def write(self, s):
         if not self.closed:
         if not self.closed:
@@ -225,8 +232,8 @@ def iter_separated(fd, sep=None, read_size=4096):
     """Iter over chunks of open file ``fd`` delimited by ``sep``. Doesn't trim."""
     """Iter over chunks of open file ``fd`` delimited by ``sep``. Doesn't trim."""
     buf = fd.read(read_size)
     buf = fd.read(read_size)
     is_str = isinstance(buf, str)
     is_str = isinstance(buf, str)
-    part = '' if is_str else b''
-    sep = sep or ('\n' if is_str else b'\n')
+    part = "" if is_str else b""
+    sep = sep or ("\n" if is_str else b"\n")
     while len(buf) > 0:
     while len(buf) > 0:
         part2, *items = buf.split(sep)
         part2, *items = buf.split(sep)
         *full, part = (part + part2, *items)
         *full, part = (part + part2, *items)
@@ -240,17 +247,17 @@ def iter_separated(fd, sep=None, read_size=4096):
 
 
 def get_tar_filter(fname, decompress):
 def get_tar_filter(fname, decompress):
     # Note that filter is None if fname is '-'.
     # Note that filter is None if fname is '-'.
-    if fname.endswith(('.tar.gz', '.tgz')):
-        filter = 'gzip -d' if decompress else 'gzip'
-    elif fname.endswith(('.tar.bz2', '.tbz')):
-        filter = 'bzip2 -d' if decompress else 'bzip2'
-    elif fname.endswith(('.tar.xz', '.txz')):
-        filter = 'xz -d' if decompress else 'xz'
-    elif fname.endswith(('.tar.lz4', )):
-        filter = 'lz4 -d' if decompress else 'lz4'
-    elif fname.endswith(('.tar.zstd', )):
-        filter = 'zstd -d' if decompress else 'zstd'
+    if fname.endswith((".tar.gz", ".tgz")):
+        filter = "gzip -d" if decompress else "gzip"
+    elif fname.endswith((".tar.bz2", ".tbz")):
+        filter = "bzip2 -d" if decompress else "bzip2"
+    elif fname.endswith((".tar.xz", ".txz")):
+        filter = "xz -d" if decompress else "xz"
+    elif fname.endswith((".tar.lz4",)):
+        filter = "lz4 -d" if decompress else "lz4"
+    elif fname.endswith((".tar.zstd",)):
+        filter = "zstd -d" if decompress else "zstd"
     else:
     else:
         filter = None
         filter = None
-    logger.debug('Automatically determined tar filter: %s', filter)
+    logger.debug("Automatically determined tar filter: %s", filter)
     return filter
     return filter

+ 63 - 37
src/borg/helpers/msgpack.py

@@ -64,7 +64,7 @@ version = mp_version
 
 
 USE_BIN_TYPE = True
 USE_BIN_TYPE = True
 RAW = False
 RAW = False
-UNICODE_ERRORS = 'surrogateescape'
+UNICODE_ERRORS = "surrogateescape"
 
 
 
 
 class PackException(Exception):
 class PackException(Exception):
@@ -76,13 +76,25 @@ class UnpackException(Exception):
 
 
 
 
 class Packer(mp_Packer):
 class Packer(mp_Packer):
-    def __init__(self, *, default=None, unicode_errors=UNICODE_ERRORS,
-                 use_single_float=False, autoreset=True, use_bin_type=USE_BIN_TYPE,
-                 strict_types=False):
+    def __init__(
+        self,
+        *,
+        default=None,
+        unicode_errors=UNICODE_ERRORS,
+        use_single_float=False,
+        autoreset=True,
+        use_bin_type=USE_BIN_TYPE,
+        strict_types=False
+    ):
         assert unicode_errors == UNICODE_ERRORS
         assert unicode_errors == UNICODE_ERRORS
-        super().__init__(default=default, unicode_errors=unicode_errors,
-                         use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type,
-                         strict_types=strict_types)
+        super().__init__(
+            default=default,
+            unicode_errors=unicode_errors,
+            use_single_float=use_single_float,
+            autoreset=autoreset,
+            use_bin_type=use_bin_type,
+            strict_types=strict_types,
+        )
 
 
     def pack(self, obj):
     def pack(self, obj):
         try:
         try:
@@ -108,18 +120,36 @@ def pack(o, stream, *, use_bin_type=USE_BIN_TYPE, unicode_errors=UNICODE_ERRORS,
 
 
 
 
 class Unpacker(mp_Unpacker):
 class Unpacker(mp_Unpacker):
-    def __init__(self, file_like=None, *, read_size=0, use_list=True, raw=RAW,
-                 object_hook=None, object_pairs_hook=None, list_hook=None,
-                 unicode_errors=UNICODE_ERRORS, max_buffer_size=0,
-                 ext_hook=ExtType,
-                 strict_map_key=False):
+    def __init__(
+        self,
+        file_like=None,
+        *,
+        read_size=0,
+        use_list=True,
+        raw=RAW,
+        object_hook=None,
+        object_pairs_hook=None,
+        list_hook=None,
+        unicode_errors=UNICODE_ERRORS,
+        max_buffer_size=0,
+        ext_hook=ExtType,
+        strict_map_key=False
+    ):
         assert raw == RAW
         assert raw == RAW
         assert unicode_errors == UNICODE_ERRORS
         assert unicode_errors == UNICODE_ERRORS
-        kw = dict(file_like=file_like, read_size=read_size, use_list=use_list, raw=raw,
-                  object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook,
-                  unicode_errors=unicode_errors, max_buffer_size=max_buffer_size,
-                  ext_hook=ext_hook,
-                  strict_map_key=strict_map_key)
+        kw = dict(
+            file_like=file_like,
+            read_size=read_size,
+            use_list=use_list,
+            raw=raw,
+            object_hook=object_hook,
+            object_pairs_hook=object_pairs_hook,
+            list_hook=list_hook,
+            unicode_errors=unicode_errors,
+            max_buffer_size=max_buffer_size,
+            ext_hook=ext_hook,
+            strict_map_key=strict_map_key,
+        )
         super().__init__(**kw)
         super().__init__(**kw)
 
 
     def unpack(self):
     def unpack(self):
@@ -141,28 +171,22 @@ class Unpacker(mp_Unpacker):
     next = __next__
     next = __next__
 
 
 
 
-def unpackb(packed, *, raw=RAW, unicode_errors=UNICODE_ERRORS,
-            strict_map_key=False,
-            **kwargs):
+def unpackb(packed, *, raw=RAW, unicode_errors=UNICODE_ERRORS, strict_map_key=False, **kwargs):
     assert raw == RAW
     assert raw == RAW
     assert unicode_errors == UNICODE_ERRORS
     assert unicode_errors == UNICODE_ERRORS
     try:
     try:
-        kw = dict(raw=raw, unicode_errors=unicode_errors,
-                  strict_map_key=strict_map_key)
+        kw = dict(raw=raw, unicode_errors=unicode_errors, strict_map_key=strict_map_key)
         kw.update(kwargs)
         kw.update(kwargs)
         return mp_unpackb(packed, **kw)
         return mp_unpackb(packed, **kw)
     except Exception as e:
     except Exception as e:
         raise UnpackException(e)
         raise UnpackException(e)
 
 
 
 
-def unpack(stream, *, raw=RAW, unicode_errors=UNICODE_ERRORS,
-           strict_map_key=False,
-           **kwargs):
+def unpack(stream, *, raw=RAW, unicode_errors=UNICODE_ERRORS, strict_map_key=False, **kwargs):
     assert raw == RAW
     assert raw == RAW
     assert unicode_errors == UNICODE_ERRORS
     assert unicode_errors == UNICODE_ERRORS
     try:
     try:
-        kw = dict(raw=raw, unicode_errors=unicode_errors,
-                  strict_map_key=strict_map_key)
+        kw = dict(raw=raw, unicode_errors=unicode_errors, strict_map_key=strict_map_key)
         kw.update(kwargs)
         kw.update(kwargs)
         return mp_unpack(stream, **kw)
         return mp_unpack(stream, **kw)
     except Exception as e:
     except Exception as e:
@@ -171,32 +195,34 @@ def unpack(stream, *, raw=RAW, unicode_errors=UNICODE_ERRORS,
 
 
 # msgpacking related utilities -----------------------------------------------
 # msgpacking related utilities -----------------------------------------------
 
 
+
 def is_slow_msgpack():
 def is_slow_msgpack():
     import msgpack
     import msgpack
     import msgpack.fallback
     import msgpack.fallback
+
     return msgpack.Packer is msgpack.fallback.Packer
     return msgpack.Packer is msgpack.fallback.Packer
 
 
 
 
 def is_supported_msgpack():
 def is_supported_msgpack():
     # DO NOT CHANGE OR REMOVE! See also requirements and comments in setup.py.
     # DO NOT CHANGE OR REMOVE! See also requirements and comments in setup.py.
     import msgpack
     import msgpack
-    return (1, 0, 3) <= msgpack.version <= (1, 0, 4) and \
-           msgpack.version not in []  # < add bad releases here to deny list
+
+    return (1, 0, 3) <= msgpack.version <= (
+        1,
+        0,
+        4,
+    ) and msgpack.version not in []  # < add bad releases here to deny list
 
 
 
 
 def get_limited_unpacker(kind):
 def get_limited_unpacker(kind):
     """return a limited Unpacker because we should not trust msgpack data received from remote"""
     """return a limited Unpacker because we should not trust msgpack data received from remote"""
     # Note: msgpack >= 0.6.1 auto-computes DoS-safe max values from len(data) for
     # Note: msgpack >= 0.6.1 auto-computes DoS-safe max values from len(data) for
     #       unpack(data) or from max_buffer_size for Unpacker(max_buffer_size=N).
     #       unpack(data) or from max_buffer_size for Unpacker(max_buffer_size=N).
-    args = dict(use_list=False,  # return tuples, not lists
-                max_buffer_size=3 * max(BUFSIZE, MAX_OBJECT_SIZE),
-                )
-    if kind in ('server', 'client'):
+    args = dict(use_list=False, max_buffer_size=3 * max(BUFSIZE, MAX_OBJECT_SIZE))  # return tuples, not lists
+    if kind in ("server", "client"):
         pass  # nothing special
         pass  # nothing special
-    elif kind in ('manifest', 'key'):
-        args.update(dict(use_list=True,  # default value
-                         object_hook=StableDict,
-                         ))
+    elif kind in ("manifest", "key"):
+        args.update(dict(use_list=True, object_hook=StableDict))  # default value
     else:
     else:
         raise ValueError('kind must be "server", "client", "manifest" or "key"')
         raise ValueError('kind must be "server", "client", "manifest" or "key"')
     return Unpacker(**args)
     return Unpacker(**args)

+ 281 - 267
src/borg/helpers/parseformat.py

@@ -15,6 +15,7 @@ from functools import partial
 from string import Formatter
 from string import Formatter
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .errors import Error
 from .errors import Error
@@ -28,34 +29,34 @@ from ..platformflags import is_win32
 
 
 
 
 def bin_to_hex(binary):
 def bin_to_hex(binary):
-    return hexlify(binary).decode('ascii')
+    return hexlify(binary).decode("ascii")
 
 
 
 
-def safe_decode(s, coding='utf-8', errors='surrogateescape'):
+def safe_decode(s, coding="utf-8", errors="surrogateescape"):
     """decode bytes to str, with round-tripping "invalid" bytes"""
     """decode bytes to str, with round-tripping "invalid" bytes"""
     if s is None:
     if s is None:
         return None
         return None
     return s.decode(coding, errors)
     return s.decode(coding, errors)
 
 
 
 
-def safe_encode(s, coding='utf-8', errors='surrogateescape'):
+def safe_encode(s, coding="utf-8", errors="surrogateescape"):
     """encode str to bytes, with round-tripping "invalid" bytes"""
     """encode str to bytes, with round-tripping "invalid" bytes"""
     if s is None:
     if s is None:
         return None
         return None
     return s.encode(coding, errors)
     return s.encode(coding, errors)
 
 
 
 
-def remove_surrogates(s, errors='replace'):
+def remove_surrogates(s, errors="replace"):
     """Replace surrogates generated by fsdecode with '?'"""
     """Replace surrogates generated by fsdecode with '?'"""
-    return s.encode('utf-8', errors).decode('utf-8')
+    return s.encode("utf-8", errors).decode("utf-8")
 
 
 
 
 def eval_escapes(s):
 def eval_escapes(s):
     """Evaluate literal escape sequences in a string (eg `\\n` -> `\n`)."""
     """Evaluate literal escape sequences in a string (eg `\\n` -> `\n`)."""
-    return s.encode('ascii', 'backslashreplace').decode('unicode-escape')
+    return s.encode("ascii", "backslashreplace").decode("unicode-escape")
 
 
 
 
-def decode_dict(d, keys, encoding='utf-8', errors='surrogateescape'):
+def decode_dict(d, keys, encoding="utf-8", errors="surrogateescape"):
     for key in keys:
     for key in keys:
         if isinstance(d.get(key), bytes):
         if isinstance(d.get(key), bytes):
             d[key] = d[key].decode(encoding, errors)
             d[key] = d[key].decode(encoding, errors)
@@ -66,13 +67,13 @@ def positive_int_validator(value):
     """argparse type for positive integers"""
     """argparse type for positive integers"""
     int_value = int(value)
     int_value = int(value)
     if int_value <= 0:
     if int_value <= 0:
-        raise argparse.ArgumentTypeError('A positive integer is required: %s' % value)
+        raise argparse.ArgumentTypeError("A positive integer is required: %s" % value)
     return int_value
     return int_value
 
 
 
 
 def interval(s):
 def interval(s):
     """Convert a string representing a valid interval to a number of hours."""
     """Convert a string representing a valid interval to a number of hours."""
-    multiplier = {'H': 1, 'd': 24, 'w': 24 * 7, 'm': 24 * 31, 'y': 24 * 365}
+    multiplier = {"H": 1, "d": 24, "w": 24 * 7, "m": 24 * 31, "y": 24 * 365}
 
 
     if s.endswith(tuple(multiplier.keys())):
     if s.endswith(tuple(multiplier.keys())):
         number = s[:-1]
         number = s[:-1]
@@ -80,8 +81,7 @@ def interval(s):
     else:
     else:
         # range suffixes in ascending multiplier order
         # range suffixes in ascending multiplier order
         ranges = [k for k, v in sorted(multiplier.items(), key=lambda t: t[1])]
         ranges = [k for k, v in sorted(multiplier.items(), key=lambda t: t[1])]
-        raise argparse.ArgumentTypeError(
-            f'Unexpected interval time unit "{s[-1]}": expected one of {ranges!r}')
+        raise argparse.ArgumentTypeError(f'Unexpected interval time unit "{s[-1]}": expected one of {ranges!r}')
 
 
     try:
     try:
         hours = int(number) * multiplier[suffix]
         hours = int(number) * multiplier[suffix]
@@ -89,17 +89,16 @@ def interval(s):
         hours = -1
         hours = -1
 
 
     if hours <= 0:
     if hours <= 0:
-        raise argparse.ArgumentTypeError(
-            'Unexpected interval number "%s": expected an integer greater than 0' % number)
+        raise argparse.ArgumentTypeError('Unexpected interval number "%s": expected an integer greater than 0' % number)
 
 
     return hours
     return hours
 
 
 
 
 def ChunkerParams(s):
 def ChunkerParams(s):
-    params = s.strip().split(',')
+    params = s.strip().split(",")
     count = len(params)
     count = len(params)
     if count == 0:
     if count == 0:
-        raise ValueError('no chunker params given')
+        raise ValueError("no chunker params given")
     algo = params[0].lower()
     algo = params[0].lower()
     if algo == CH_FIXED and 2 <= count <= 3:  # fixed, block_size[, header_size]
     if algo == CH_FIXED and 2 <= count <= 3:  # fixed, block_size[, header_size]
         block_size = int(params[1])
         block_size = int(params[1])
@@ -110,36 +109,36 @@ def ChunkerParams(s):
             # or in-memory chunk management.
             # or in-memory chunk management.
             # choose the block (chunk) size wisely: if you have a lot of data and you cut
             # choose the block (chunk) size wisely: if you have a lot of data and you cut
             # it into very small chunks, you are asking for trouble!
             # it into very small chunks, you are asking for trouble!
-            raise ValueError('block_size must not be less than 64 Bytes')
+            raise ValueError("block_size must not be less than 64 Bytes")
         if block_size > MAX_DATA_SIZE or header_size > MAX_DATA_SIZE:
         if block_size > MAX_DATA_SIZE or header_size > MAX_DATA_SIZE:
-            raise ValueError('block_size and header_size must not exceed MAX_DATA_SIZE [%d]' % MAX_DATA_SIZE)
+            raise ValueError("block_size and header_size must not exceed MAX_DATA_SIZE [%d]" % MAX_DATA_SIZE)
         return algo, block_size, header_size
         return algo, block_size, header_size
-    if algo == 'default' and count == 1:  # default
+    if algo == "default" and count == 1:  # default
         return CHUNKER_PARAMS
         return CHUNKER_PARAMS
     # this must stay last as it deals with old-style compat mode (no algorithm, 4 params, buzhash):
     # this must stay last as it deals with old-style compat mode (no algorithm, 4 params, buzhash):
     if algo == CH_BUZHASH and count == 5 or count == 4:  # [buzhash, ]chunk_min, chunk_max, chunk_mask, window_size
     if algo == CH_BUZHASH and count == 5 or count == 4:  # [buzhash, ]chunk_min, chunk_max, chunk_mask, window_size
-        chunk_min, chunk_max, chunk_mask, window_size = (int(p) for p in params[count - 4:])
+        chunk_min, chunk_max, chunk_mask, window_size = (int(p) for p in params[count - 4 :])
         if not (chunk_min <= chunk_mask <= chunk_max):
         if not (chunk_min <= chunk_mask <= chunk_max):
-            raise ValueError('required: chunk_min <= chunk_mask <= chunk_max')
+            raise ValueError("required: chunk_min <= chunk_mask <= chunk_max")
         if chunk_min < 6:
         if chunk_min < 6:
             # see comment in 'fixed' algo check
             # see comment in 'fixed' algo check
-            raise ValueError('min. chunk size exponent must not be less than 6 (2^6 = 64B min. chunk size)')
+            raise ValueError("min. chunk size exponent must not be less than 6 (2^6 = 64B min. chunk size)")
         if chunk_max > 23:
         if chunk_max > 23:
-            raise ValueError('max. chunk size exponent must not be more than 23 (2^23 = 8MiB max. chunk size)')
+            raise ValueError("max. chunk size exponent must not be more than 23 (2^23 = 8MiB max. chunk size)")
         return CH_BUZHASH, chunk_min, chunk_max, chunk_mask, window_size
         return CH_BUZHASH, chunk_min, chunk_max, chunk_mask, window_size
-    raise ValueError('invalid chunker params')
+    raise ValueError("invalid chunker params")
 
 
 
 
 def FilesCacheMode(s):
 def FilesCacheMode(s):
-    ENTRIES_MAP = dict(ctime='c', mtime='m', size='s', inode='i', rechunk='r', disabled='d')
-    VALID_MODES = ('cis', 'ims', 'cs', 'ms', 'cr', 'mr', 'd', 's')  # letters in alpha order
-    entries = set(s.strip().split(','))
+    ENTRIES_MAP = dict(ctime="c", mtime="m", size="s", inode="i", rechunk="r", disabled="d")
+    VALID_MODES = ("cis", "ims", "cs", "ms", "cr", "mr", "d", "s")  # letters in alpha order
+    entries = set(s.strip().split(","))
     if not entries <= set(ENTRIES_MAP):
     if not entries <= set(ENTRIES_MAP):
-        raise ValueError('cache mode must be a comma-separated list of: %s' % ','.join(sorted(ENTRIES_MAP)))
+        raise ValueError("cache mode must be a comma-separated list of: %s" % ",".join(sorted(ENTRIES_MAP)))
     short_entries = {ENTRIES_MAP[entry] for entry in entries}
     short_entries = {ENTRIES_MAP[entry] for entry in entries}
-    mode = ''.join(sorted(short_entries))
+    mode = "".join(sorted(short_entries))
     if mode not in VALID_MODES:
     if mode not in VALID_MODES:
-        raise ValueError('cache mode short must be one of: %s' % ','.join(VALID_MODES))
+        raise ValueError("cache mode short must be one of: %s" % ",".join(VALID_MODES))
     return mode
     return mode
 
 
 
 
@@ -151,9 +150,9 @@ def partial_format(format, mapping):
     """
     """
     for key, value in mapping.items():
     for key, value in mapping.items():
         key = re.escape(key)
         key = re.escape(key)
-        format = re.sub(fr'(?<!\{{)((\{{{key}\}})|(\{{{key}:[^\}}]*\}}))',
-                        lambda match: match.group(1).format_map(mapping),
-                        format)
+        format = re.sub(
+            rf"(?<!\{{)((\{{{key}\}})|(\{{{key}:[^\}}]*\}}))", lambda match: match.group(1).format_map(mapping), format
+        )
     return format
     return format
 
 
 
 
@@ -162,7 +161,7 @@ class DatetimeWrapper:
         self.dt = dt
         self.dt = dt
 
 
     def __format__(self, format_spec):
     def __format__(self, format_spec):
-        if format_spec == '':
+        if format_spec == "":
             format_spec = ISO_FORMAT_NO_USECS
             format_spec = ISO_FORMAT_NO_USECS
         return self.dt.__format__(format_spec)
         return self.dt.__format__(format_spec)
 
 
@@ -190,20 +189,21 @@ def format_line(format, data):
 def replace_placeholders(text, overrides={}):
 def replace_placeholders(text, overrides={}):
     """Replace placeholders in text with their values."""
     """Replace placeholders in text with their values."""
     from ..platform import fqdn, hostname, getosusername
     from ..platform import fqdn, hostname, getosusername
+
     current_time = datetime.now(timezone.utc)
     current_time = datetime.now(timezone.utc)
     data = {
     data = {
-        'pid': os.getpid(),
-        'fqdn': fqdn,
-        'reverse-fqdn': '.'.join(reversed(fqdn.split('.'))),
-        'hostname': hostname,
-        'now': DatetimeWrapper(current_time.astimezone(None)),
-        'utcnow': DatetimeWrapper(current_time),
-        'user': getosusername(),
-        'uuid4': str(uuid.uuid4()),
-        'borgversion': borg_version,
-        'borgmajor': '%d' % borg_version_tuple[:1],
-        'borgminor': '%d.%d' % borg_version_tuple[:2],
-        'borgpatch': '%d.%d.%d' % borg_version_tuple[:3],
+        "pid": os.getpid(),
+        "fqdn": fqdn,
+        "reverse-fqdn": ".".join(reversed(fqdn.split("."))),
+        "hostname": hostname,
+        "now": DatetimeWrapper(current_time.astimezone(None)),
+        "utcnow": DatetimeWrapper(current_time),
+        "user": getosusername(),
+        "uuid4": str(uuid.uuid4()),
+        "borgversion": borg_version,
+        "borgmajor": "%d" % borg_version_tuple[:1],
+        "borgminor": "%d.%d" % borg_version_tuple[:2],
+        "borgpatch": "%d.%d.%d" % borg_version_tuple[:3],
         **overrides,
         **overrides,
     }
     }
     return format_line(text, data)
     return format_line(text, data)
@@ -220,17 +220,17 @@ CommentSpec = replace_placeholders
 
 
 def SortBySpec(text):
 def SortBySpec(text):
     from .manifest import AI_HUMAN_SORT_KEYS
     from .manifest import AI_HUMAN_SORT_KEYS
-    for token in text.split(','):
+
+    for token in text.split(","):
         if token not in AI_HUMAN_SORT_KEYS:
         if token not in AI_HUMAN_SORT_KEYS:
-            raise ValueError('Invalid sort key: %s' % token)
-    return text.replace('timestamp', 'ts')
+            raise ValueError("Invalid sort key: %s" % token)
+    return text.replace("timestamp", "ts")
 
 
 
 
 def format_file_size(v, precision=2, sign=False, iec=False):
 def format_file_size(v, precision=2, sign=False, iec=False):
-    """Format file size into a human friendly format
-    """
+    """Format file size into a human friendly format"""
     fn = sizeof_fmt_iec if iec else sizeof_fmt_decimal
     fn = sizeof_fmt_iec if iec else sizeof_fmt_decimal
-    return fn(v, suffix='B', sep=' ', precision=precision, sign=sign)
+    return fn(v, suffix="B", sep=" ", precision=precision, sign=sign)
 
 
 
 
 class FileSize(int):
 class FileSize(int):
@@ -250,22 +250,16 @@ def parse_file_size(s):
     suffix = s[-1]
     suffix = s[-1]
     power = 1000
     power = 1000
     try:
     try:
-        factor = {
-            'K': power,
-            'M': power**2,
-            'G': power**3,
-            'T': power**4,
-            'P': power**5,
-        }[suffix]
+        factor = {"K": power, "M": power**2, "G": power**3, "T": power**4, "P": power**5}[suffix]
         s = s[:-1]
         s = s[:-1]
     except KeyError:
     except KeyError:
         factor = 1
         factor = 1
     return int(float(s) * factor)
     return int(float(s) * factor)
 
 
 
 
-def sizeof_fmt(num, suffix='B', units=None, power=None, sep='', precision=2, sign=False):
-    sign = '+' if sign and num > 0 else ''
-    fmt = '{0:{1}.{2}f}{3}{4}{5}'
+def sizeof_fmt(num, suffix="B", units=None, power=None, sep="", precision=2, sign=False):
+    sign = "+" if sign and num > 0 else ""
+    fmt = "{0:{1}.{2}f}{3}{4}{5}"
     prec = 0
     prec = 0
     for unit in units[:-1]:
     for unit in units[:-1]:
         if abs(round(num, precision)) < power:
         if abs(round(num, precision)) < power:
@@ -277,27 +271,37 @@ def sizeof_fmt(num, suffix='B', units=None, power=None, sep='', precision=2, sig
     return fmt.format(num, sign, prec, sep, unit, suffix)
     return fmt.format(num, sign, prec, sep, unit, suffix)
 
 
 
 
-def sizeof_fmt_iec(num, suffix='B', sep='', precision=2, sign=False):
-    return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign,
-                      units=['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'], power=1024)
+def sizeof_fmt_iec(num, suffix="B", sep="", precision=2, sign=False):
+    return sizeof_fmt(
+        num,
+        suffix=suffix,
+        sep=sep,
+        precision=precision,
+        sign=sign,
+        units=["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"],
+        power=1024,
+    )
 
 
 
 
-def sizeof_fmt_decimal(num, suffix='B', sep='', precision=2, sign=False):
-    return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign,
-                      units=['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'], power=1000)
+def sizeof_fmt_decimal(num, suffix="B", sep="", precision=2, sign=False):
+    return sizeof_fmt(
+        num,
+        suffix=suffix,
+        sep=sep,
+        precision=precision,
+        sign=sign,
+        units=["", "k", "M", "G", "T", "P", "E", "Z", "Y"],
+        power=1000,
+    )
 
 
 
 
 def format_archive(archive):
 def format_archive(archive):
-    return '%-36s %s [%s]' % (
-        archive.name,
-        format_time(to_localtime(archive.ts)),
-        bin_to_hex(archive.id),
-    )
+    return "%-36s %s [%s]" % (archive.name, format_time(to_localtime(archive.ts)), bin_to_hex(archive.id))
 
 
 
 
 def parse_stringified_list(s):
 def parse_stringified_list(s):
     l = re.split(" *, *", s)
     l = re.split(" *, *", s)
-    return [item for item in l if item != '']
+    return [item for item in l if item != ""]
 
 
 
 
 class Location:
 class Location:
@@ -343,28 +347,42 @@ class Location:
     """
     """
 
 
     # regexes for misc. kinds of supported location specifiers:
     # regexes for misc. kinds of supported location specifiers:
-    ssh_re = re.compile(r"""
+    ssh_re = re.compile(
+        r"""
         (?P<proto>ssh)://                                       # ssh://
         (?P<proto>ssh)://                                       # ssh://
-        """ + optional_user_re + host_re + r"""                 # user@  (optional), host name or address
+        """
+        + optional_user_re
+        + host_re
+        + r"""                 # user@  (optional), host name or address
         (?::(?P<port>\d+))?                                     # :port (optional)
         (?::(?P<port>\d+))?                                     # :port (optional)
-        """ + abs_path_re, re.VERBOSE)                          # path
+        """
+        + abs_path_re,
+        re.VERBOSE,
+    )  # path
 
 
-    file_re = re.compile(r"""
+    file_re = re.compile(
+        r"""
         (?P<proto>file)://                                      # file://
         (?P<proto>file)://                                      # file://
-        """ + file_path_re, re.VERBOSE)                         # servername/path or path
+        """
+        + file_path_re,
+        re.VERBOSE,
+    )  # servername/path or path
 
 
-    local_re = re.compile(local_path_re, re.VERBOSE)            # local path
+    local_re = re.compile(local_path_re, re.VERBOSE)  # local path
 
 
-    win_file_re = re.compile(r"""
+    win_file_re = re.compile(
+        r"""
         (?:file://)?                                        # optional file protocol
         (?:file://)?                                        # optional file protocol
         (?P<path>
         (?P<path>
             (?:[a-zA-Z]:)?                                  # Drive letter followed by a colon (optional)
             (?:[a-zA-Z]:)?                                  # Drive letter followed by a colon (optional)
             (?:[^:]+)                                       # Anything which does not contain a :, at least one character
             (?:[^:]+)                                       # Anything which does not contain a :, at least one character
         )
         )
-        """, re.VERBOSE)
+        """,
+        re.VERBOSE,
+    )
 
 
-    def __init__(self, text='', overrides={}, other=False):
-        self.repo_env_var = 'BORG_OTHER_REPO' if other else 'BORG_REPO'
+    def __init__(self, text="", overrides={}, other=False):
+        self.repo_env_var = "BORG_OTHER_REPO" if other else "BORG_REPO"
         self.valid = False
         self.valid = False
         self.proto = None
         self.proto = None
         self.user = None
         self.user = None
@@ -393,15 +411,15 @@ class Location:
     def _parse(self, text):
     def _parse(self, text):
         def normpath_special(p):
         def normpath_special(p):
             # avoid that normpath strips away our relative path hack and even makes p absolute
             # avoid that normpath strips away our relative path hack and even makes p absolute
-            relative = p.startswith('/./')
+            relative = p.startswith("/./")
             p = os.path.normpath(p)
             p = os.path.normpath(p)
-            return ('/.' + p) if relative else p
+            return ("/." + p) if relative else p
 
 
         if is_win32:
         if is_win32:
             m = self.win_file_re.match(text)
             m = self.win_file_re.match(text)
             if m:
             if m:
-                self.proto = 'file'
-                self.path = m.group('path')
+                self.proto = "file"
+                self.path = m.group("path")
                 return True
                 return True
 
 
             # On windows we currently only support windows paths.
             # On windows we currently only support windows paths.
@@ -409,38 +427,38 @@ class Location:
 
 
         m = self.ssh_re.match(text)
         m = self.ssh_re.match(text)
         if m:
         if m:
-            self.proto = m.group('proto')
-            self.user = m.group('user')
-            self._host = m.group('host')
-            self.port = m.group('port') and int(m.group('port')) or None
-            self.path = normpath_special(m.group('path'))
+            self.proto = m.group("proto")
+            self.user = m.group("user")
+            self._host = m.group("host")
+            self.port = m.group("port") and int(m.group("port")) or None
+            self.path = normpath_special(m.group("path"))
             return True
             return True
         m = self.file_re.match(text)
         m = self.file_re.match(text)
         if m:
         if m:
-            self.proto = m.group('proto')
-            self.path = normpath_special(m.group('path'))
+            self.proto = m.group("proto")
+            self.path = normpath_special(m.group("path"))
             return True
             return True
         m = self.local_re.match(text)
         m = self.local_re.match(text)
         if m:
         if m:
-            self.proto = 'file'
-            self.path = normpath_special(m.group('path'))
+            self.proto = "file"
+            self.path = normpath_special(m.group("path"))
             return True
             return True
         return False
         return False
 
 
     def __str__(self):
     def __str__(self):
         items = [
         items = [
-            'proto=%r' % self.proto,
-            'user=%r' % self.user,
-            'host=%r' % self.host,
-            'port=%r' % self.port,
-            'path=%r' % self.path,
+            "proto=%r" % self.proto,
+            "user=%r" % self.user,
+            "host=%r" % self.host,
+            "port=%r" % self.port,
+            "path=%r" % self.path,
         ]
         ]
-        return ', '.join(items)
+        return ", ".join(items)
 
 
     def to_key_filename(self):
     def to_key_filename(self):
-        name = re.sub(r'[^\w]', '_', self.path).strip('_')
-        if self.proto != 'file':
-            name = re.sub(r'[^\w]', '_', self.host) + '__' + name
+        name = re.sub(r"[^\w]", "_", self.path).strip("_")
+        if self.proto != "file":
+            name = re.sub(r"[^\w]", "_", self.host) + "__" + name
         if len(name) > 100:
         if len(name) > 100:
             # Limit file names to some reasonable length. Most file systems
             # Limit file names to some reasonable length. Most file systems
             # limit them to 255 [unit of choice]; due to variations in unicode
             # limit them to 255 [unit of choice]; due to variations in unicode
@@ -455,28 +473,30 @@ class Location:
     def host(self):
     def host(self):
         # strip square brackets used for IPv6 addrs
         # strip square brackets used for IPv6 addrs
         if self._host is not None:
         if self._host is not None:
-            return self._host.lstrip('[').rstrip(']')
+            return self._host.lstrip("[").rstrip("]")
 
 
     def canonical_path(self):
     def canonical_path(self):
-        if self.proto == 'file':
+        if self.proto == "file":
             return self.path
             return self.path
         else:
         else:
-            if self.path and self.path.startswith('~'):
-                path = '/' + self.path  # /~/x = path x relative to home dir
-            elif self.path and not self.path.startswith('/'):
-                path = '/./' + self.path  # /./x = path x relative to cwd
+            if self.path and self.path.startswith("~"):
+                path = "/" + self.path  # /~/x = path x relative to home dir
+            elif self.path and not self.path.startswith("/"):
+                path = "/./" + self.path  # /./x = path x relative to cwd
             else:
             else:
                 path = self.path
                 path = self.path
-            return 'ssh://{}{}{}{}'.format(f'{self.user}@' if self.user else '',
-                                           self._host,  # needed for ipv6 addrs
-                                           f':{self.port}' if self.port else '',
-                                           path)
+            return "ssh://{}{}{}{}".format(
+                f"{self.user}@" if self.user else "",
+                self._host,  # needed for ipv6 addrs
+                f":{self.port}" if self.port else "",
+                path,
+            )
 
 
     def with_timestamp(self, timestamp):
     def with_timestamp(self, timestamp):
-        return Location(self.raw, overrides={
-            'now': DatetimeWrapper(timestamp.astimezone(None)),
-            'utcnow': DatetimeWrapper(timestamp),
-        })
+        return Location(
+            self.raw,
+            overrides={"now": DatetimeWrapper(timestamp.astimezone(None)), "utcnow": DatetimeWrapper(timestamp)},
+        )
 
 
 
 
 def location_validator(proto=None, other=False):
 def location_validator(proto=None, other=False):
@@ -486,33 +506,35 @@ def location_validator(proto=None, other=False):
         except ValueError as err:
         except ValueError as err:
             raise argparse.ArgumentTypeError(str(err)) from None
             raise argparse.ArgumentTypeError(str(err)) from None
         if proto is not None and loc.proto != proto:
         if proto is not None and loc.proto != proto:
-            if proto == 'file':
+            if proto == "file":
                 raise argparse.ArgumentTypeError('"%s": Repository must be local' % text)
                 raise argparse.ArgumentTypeError('"%s": Repository must be local' % text)
             else:
             else:
                 raise argparse.ArgumentTypeError('"%s": Repository must be remote' % text)
                 raise argparse.ArgumentTypeError('"%s": Repository must be remote' % text)
         return loc
         return loc
+
     return validator
     return validator
 
 
 
 
 def archivename_validator():
 def archivename_validator():
     def validator(text):
     def validator(text):
         text = replace_placeholders(text)
         text = replace_placeholders(text)
-        if '/' in text or '::' in text or not text:
+        if "/" in text or "::" in text or not text:
             raise argparse.ArgumentTypeError('Invalid archive name: "%s"' % text)
             raise argparse.ArgumentTypeError('Invalid archive name: "%s"' % text)
         return text
         return text
+
     return validator
     return validator
 
 
 
 
 class BaseFormatter:
 class BaseFormatter:
     FIXED_KEYS = {
     FIXED_KEYS = {
         # Formatting aids
         # Formatting aids
-        'LF': '\n',
-        'SPACE': ' ',
-        'TAB': '\t',
-        'CR': '\r',
-        'NUL': '\0',
-        'NEWLINE': os.linesep,
-        'NL': os.linesep,
+        "LF": "\n",
+        "SPACE": " ",
+        "TAB": "\t",
+        "CR": "\r",
+        "NUL": "\0",
+        "NEWLINE": os.linesep,
+        "NL": os.linesep,
     }
     }
 
 
     def get_item_data(self, item):
     def get_item_data(self, item):
@@ -523,42 +545,45 @@ class BaseFormatter:
 
 
     @staticmethod
     @staticmethod
     def keys_help():
     def keys_help():
-        return "- NEWLINE: OS dependent line separator\n" \
-               "- NL: alias of NEWLINE\n" \
-               "- NUL: NUL character for creating print0 / xargs -0 like output, see barchive and bpath keys below\n" \
-               "- SPACE\n" \
-               "- TAB\n" \
-               "- CR\n" \
-               "- LF"
+        return (
+            "- NEWLINE: OS dependent line separator\n"
+            "- NL: alias of NEWLINE\n"
+            "- NUL: NUL character for creating print0 / xargs -0 like output, see barchive and bpath keys below\n"
+            "- SPACE\n"
+            "- TAB\n"
+            "- CR\n"
+            "- LF"
+        )
 
 
 
 
 class ArchiveFormatter(BaseFormatter):
 class ArchiveFormatter(BaseFormatter):
     KEY_DESCRIPTIONS = {
     KEY_DESCRIPTIONS = {
-        'archive': 'archive name interpreted as text (might be missing non-text characters, see barchive)',
-        'name': 'alias of "archive"',
-        'barchive': 'verbatim archive name, can contain any character except NUL',
-        'comment': 'archive comment interpreted as text (might be missing non-text characters, see bcomment)',
-        'bcomment': 'verbatim archive comment, can contain any character except NUL',
+        "archive": "archive name interpreted as text (might be missing non-text characters, see barchive)",
+        "name": 'alias of "archive"',
+        "barchive": "verbatim archive name, can contain any character except NUL",
+        "comment": "archive comment interpreted as text (might be missing non-text characters, see bcomment)",
+        "bcomment": "verbatim archive comment, can contain any character except NUL",
         # *start* is the key used by borg-info for this timestamp, this makes the formats more compatible
         # *start* is the key used by borg-info for this timestamp, this makes the formats more compatible
-        'start': 'time (start) of creation of the archive',
-        'time': 'alias of "start"',
-        'end': 'time (end) of creation of the archive',
-        'command_line': 'command line which was used to create the archive',
-        'id': 'internal ID of the archive',
-        'hostname': 'hostname of host on which this archive was created',
-        'username': 'username of user who created this archive',
+        "start": "time (start) of creation of the archive",
+        "time": 'alias of "start"',
+        "end": "time (end) of creation of the archive",
+        "command_line": "command line which was used to create the archive",
+        "id": "internal ID of the archive",
+        "hostname": "hostname of host on which this archive was created",
+        "username": "username of user who created this archive",
     }
     }
     KEY_GROUPS = (
     KEY_GROUPS = (
-        ('archive', 'name', 'barchive', 'comment', 'bcomment', 'id'),
-        ('start', 'time', 'end', 'command_line'),
-        ('hostname', 'username'),
+        ("archive", "name", "barchive", "comment", "bcomment", "id"),
+        ("start", "time", "end", "command_line"),
+        ("hostname", "username"),
     )
     )
 
 
     @classmethod
     @classmethod
     def available_keys(cls):
     def available_keys(cls):
         from .manifest import ArchiveInfo
         from .manifest import ArchiveInfo
-        fake_archive_info = ArchiveInfo('archivename', b'\1'*32, datetime(1970, 1, 1, tzinfo=timezone.utc))
-        formatter = cls('', None, None, None)
+
+        fake_archive_info = ArchiveInfo("archivename", b"\1" * 32, datetime(1970, 1, 1, tzinfo=timezone.utc))
+        formatter = cls("", None, None, None)
         keys = []
         keys = []
         keys.extend(formatter.call_keys.keys())
         keys.extend(formatter.call_keys.keys())
         keys.extend(formatter.get_item_data(fake_archive_info).keys())
         keys.extend(formatter.get_item_data(fake_archive_info).keys())
@@ -596,12 +621,12 @@ class ArchiveFormatter(BaseFormatter):
         self.format = partial_format(format, static_keys)
         self.format = partial_format(format, static_keys)
         self.format_keys = {f[1] for f in Formatter().parse(format)}
         self.format_keys = {f[1] for f in Formatter().parse(format)}
         self.call_keys = {
         self.call_keys = {
-            'hostname': partial(self.get_meta, 'hostname', rs=True),
-            'username': partial(self.get_meta, 'username', rs=True),
-            'comment': partial(self.get_meta, 'comment', rs=True),
-            'bcomment': partial(self.get_meta, 'comment', rs=False),
-            'end': self.get_ts_end,
-            'command_line': self.get_cmdline,
+            "hostname": partial(self.get_meta, "hostname", rs=True),
+            "username": partial(self.get_meta, "username", rs=True),
+            "comment": partial(self.get_meta, "comment", rs=True),
+            "bcomment": partial(self.get_meta, "comment", rs=False),
+            "end": self.get_ts_end,
+            "command_line": self.get_cmdline,
         }
         }
         self.used_call_keys = set(self.call_keys) & self.format_keys
         self.used_call_keys = set(self.call_keys) & self.format_keys
         if self.json:
         if self.json:
@@ -611,21 +636,23 @@ class ArchiveFormatter(BaseFormatter):
             self.item_data = static_keys
             self.item_data = static_keys
 
 
     def format_item_json(self, item):
     def format_item_json(self, item):
-        return json.dumps(self.get_item_data(item), cls=BorgJsonEncoder) + '\n'
+        return json.dumps(self.get_item_data(item), cls=BorgJsonEncoder) + "\n"
 
 
     def get_item_data(self, archive_info):
     def get_item_data(self, archive_info):
         self.name = archive_info.name
         self.name = archive_info.name
         self.id = archive_info.id
         self.id = archive_info.id
         item_data = {}
         item_data = {}
         item_data.update(self.item_data)
         item_data.update(self.item_data)
-        item_data.update({
-            'name': remove_surrogates(archive_info.name),
-            'archive': remove_surrogates(archive_info.name),
-            'barchive': archive_info.name,
-            'id': bin_to_hex(archive_info.id),
-            'time': self.format_time(archive_info.ts),
-            'start': self.format_time(archive_info.ts),
-        })
+        item_data.update(
+            {
+                "name": remove_surrogates(archive_info.name),
+                "archive": remove_surrogates(archive_info.name),
+                "barchive": archive_info.name,
+                "id": bin_to_hex(archive_info.id),
+                "time": self.format_time(archive_info.ts),
+                "start": self.format_time(archive_info.ts),
+            }
+        )
         for key in self.used_call_keys:
         for key in self.used_call_keys:
             item_data[key] = self.call_keys[key]()
             item_data[key] = self.call_keys[key]()
         return item_data
         return item_data
@@ -635,19 +662,20 @@ class ArchiveFormatter(BaseFormatter):
         """lazy load / update loaded archive"""
         """lazy load / update loaded archive"""
         if self._archive is None or self._archive.id != self.id:
         if self._archive is None or self._archive.id != self.id:
             from ..archive import Archive
             from ..archive import Archive
+
             self._archive = Archive(self.repository, self.key, self.manifest, self.name, iec=self.iec)
             self._archive = Archive(self.repository, self.key, self.manifest, self.name, iec=self.iec)
         return self._archive
         return self._archive
 
 
     def get_meta(self, key, rs):
     def get_meta(self, key, rs):
-        value = self.archive.metadata.get(key, '')
+        value = self.archive.metadata.get(key, "")
         return remove_surrogates(value) if rs else value
         return remove_surrogates(value) if rs else value
 
 
     def get_cmdline(self):
     def get_cmdline(self):
-        cmdline = map(remove_surrogates, self.archive.metadata.get('cmdline', []))
+        cmdline = map(remove_surrogates, self.archive.metadata.get("cmdline", []))
         if self.json:
         if self.json:
             return list(cmdline)
             return list(cmdline)
         else:
         else:
-            return ' '.join(map(shlex.quote, cmdline))
+            return " ".join(map(shlex.quote, cmdline))
 
 
     def get_ts_end(self):
     def get_ts_end(self):
         return self.format_time(self.archive.ts_end)
         return self.format_time(self.archive.ts_end)
@@ -659,31 +687,29 @@ class ArchiveFormatter(BaseFormatter):
 class ItemFormatter(BaseFormatter):
 class ItemFormatter(BaseFormatter):
     # we provide the hash algos from python stdlib (except shake_*) and additionally xxh64.
     # we provide the hash algos from python stdlib (except shake_*) and additionally xxh64.
     # shake_* is not provided because it uses an incompatible .digest() method to support variable length.
     # shake_* is not provided because it uses an incompatible .digest() method to support variable length.
-    hash_algorithms = hashlib.algorithms_guaranteed.union({'xxh64'}).difference({'shake_128', 'shake_256'})
+    hash_algorithms = hashlib.algorithms_guaranteed.union({"xxh64"}).difference({"shake_128", "shake_256"})
     KEY_DESCRIPTIONS = {
     KEY_DESCRIPTIONS = {
-        'bpath': 'verbatim POSIX path, can contain any character except NUL',
-        'path': 'path interpreted as text (might be missing non-text characters, see bpath)',
-        'source': 'link target for symlinks (identical to linktarget)',
-        'hlid': 'hard link identity (same if hardlinking same fs object)',
-        'extra': 'prepends {source} with " -> " for soft links and " link to " for hard links',
-        'dsize': 'deduplicated size',
-        'num_chunks': 'number of chunks in this file',
-        'unique_chunks': 'number of unique chunks in this file',
-        'xxh64': 'XXH64 checksum of this file (note: this is NOT a cryptographic hash!)',
-        'health': 'either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks)',
+        "bpath": "verbatim POSIX path, can contain any character except NUL",
+        "path": "path interpreted as text (might be missing non-text characters, see bpath)",
+        "source": "link target for symlinks (identical to linktarget)",
+        "hlid": "hard link identity (same if hardlinking same fs object)",
+        "extra": 'prepends {source} with " -> " for soft links and " link to " for hard links',
+        "dsize": "deduplicated size",
+        "num_chunks": "number of chunks in this file",
+        "unique_chunks": "number of unique chunks in this file",
+        "xxh64": "XXH64 checksum of this file (note: this is NOT a cryptographic hash!)",
+        "health": 'either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks)',
     }
     }
     KEY_GROUPS = (
     KEY_GROUPS = (
-        ('type', 'mode', 'uid', 'gid', 'user', 'group', 'path', 'bpath', 'source', 'linktarget', 'hlid', 'flags'),
-        ('size', 'dsize', 'num_chunks', 'unique_chunks'),
-        ('mtime', 'ctime', 'atime', 'isomtime', 'isoctime', 'isoatime'),
+        ("type", "mode", "uid", "gid", "user", "group", "path", "bpath", "source", "linktarget", "hlid", "flags"),
+        ("size", "dsize", "num_chunks", "unique_chunks"),
+        ("mtime", "ctime", "atime", "isomtime", "isoctime", "isoatime"),
         tuple(sorted(hash_algorithms)),
         tuple(sorted(hash_algorithms)),
-        ('archiveid', 'archivename', 'extra'),
-        ('health', )
+        ("archiveid", "archivename", "extra"),
+        ("health",),
     )
     )
 
 
-    KEYS_REQUIRING_CACHE = (
-        'dsize', 'unique_chunks',
-    )
+    KEYS_REQUIRING_CACHE = ("dsize", "unique_chunks")
 
 
     @classmethod
     @classmethod
     def available_keys(cls):
     def available_keys(cls):
@@ -691,7 +717,8 @@ class ItemFormatter(BaseFormatter):
             fpr = name = ""
             fpr = name = ""
 
 
         from ..item import Item
         from ..item import Item
-        fake_item = Item(mode=0, path='', user='', group='', mtime=0, uid=0, gid=0)
+
+        fake_item = Item(mode=0, path="", user="", group="", mtime=0, uid=0, gid=0)
         formatter = cls(FakeArchive, "")
         formatter = cls(FakeArchive, "")
         keys = []
         keys = []
         keys.extend(formatter.call_keys.keys())
         keys.extend(formatter.call_keys.keys())
@@ -723,13 +750,11 @@ class ItemFormatter(BaseFormatter):
 
 
     def __init__(self, archive, format, *, json_lines=False):
     def __init__(self, archive, format, *, json_lines=False):
         from ..checksums import StreamingXXH64
         from ..checksums import StreamingXXH64
+
         self.xxh64 = StreamingXXH64
         self.xxh64 = StreamingXXH64
         self.archive = archive
         self.archive = archive
         self.json_lines = json_lines
         self.json_lines = json_lines
-        static_keys = {
-            'archivename': archive.name,
-            'archiveid': archive.fpr,
-        }
+        static_keys = {"archivename": archive.name, "archiveid": archive.fpr}
         static_keys.update(self.FIXED_KEYS)
         static_keys.update(self.FIXED_KEYS)
         if self.json_lines:
         if self.json_lines:
             self.item_data = {}
             self.item_data = {}
@@ -739,23 +764,23 @@ class ItemFormatter(BaseFormatter):
         self.format = partial_format(format, static_keys)
         self.format = partial_format(format, static_keys)
         self.format_keys = {f[1] for f in Formatter().parse(format)}
         self.format_keys = {f[1] for f in Formatter().parse(format)}
         self.call_keys = {
         self.call_keys = {
-            'size': self.calculate_size,
-            'dsize': partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.size),
-            'num_chunks': self.calculate_num_chunks,
-            'unique_chunks': partial(self.sum_unique_chunks_metadata, lambda chunk: 1),
-            'isomtime': partial(self.format_iso_time, 'mtime'),
-            'isoctime': partial(self.format_iso_time, 'ctime'),
-            'isoatime': partial(self.format_iso_time, 'atime'),
-            'mtime': partial(self.format_time, 'mtime'),
-            'ctime': partial(self.format_time, 'ctime'),
-            'atime': partial(self.format_time, 'atime'),
+            "size": self.calculate_size,
+            "dsize": partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.size),
+            "num_chunks": self.calculate_num_chunks,
+            "unique_chunks": partial(self.sum_unique_chunks_metadata, lambda chunk: 1),
+            "isomtime": partial(self.format_iso_time, "mtime"),
+            "isoctime": partial(self.format_iso_time, "ctime"),
+            "isoatime": partial(self.format_iso_time, "atime"),
+            "mtime": partial(self.format_time, "mtime"),
+            "ctime": partial(self.format_time, "ctime"),
+            "atime": partial(self.format_time, "atime"),
         }
         }
         for hash_function in self.hash_algorithms:
         for hash_function in self.hash_algorithms:
             self.call_keys[hash_function] = partial(self.hash_item, hash_function)
             self.call_keys[hash_function] = partial(self.hash_item, hash_function)
         self.used_call_keys = set(self.call_keys) & self.format_keys
         self.used_call_keys = set(self.call_keys) & self.format_keys
 
 
     def format_item_json(self, item):
     def format_item_json(self, item):
-        return json.dumps(self.get_item_data(item), cls=BorgJsonEncoder) + '\n'
+        return json.dumps(self.get_item_data(item), cls=BorgJsonEncoder) + "\n"
 
 
     def get_item_data(self, item):
     def get_item_data(self, item):
         item_data = {}
         item_data = {}
@@ -763,30 +788,30 @@ class ItemFormatter(BaseFormatter):
         mode = stat.filemode(item.mode)
         mode = stat.filemode(item.mode)
         item_type = mode[0]
         item_type = mode[0]
 
 
-        source = item.get('source', '')
-        extra = ''
+        source = item.get("source", "")
+        extra = ""
         if source:
         if source:
             source = remove_surrogates(source)
             source = remove_surrogates(source)
-            extra = ' -> %s' % source
-        hlid = item.get('hlid')
-        hlid = bin_to_hex(hlid) if hlid else ''
-        item_data['type'] = item_type
-        item_data['mode'] = mode
-        item_data['user'] = item.get('user', str(item.uid))
-        item_data['group'] = item.get('group', str(item.gid))
-        item_data['uid'] = item.uid
-        item_data['gid'] = item.gid
-        item_data['path'] = remove_surrogates(item.path)
+            extra = " -> %s" % source
+        hlid = item.get("hlid")
+        hlid = bin_to_hex(hlid) if hlid else ""
+        item_data["type"] = item_type
+        item_data["mode"] = mode
+        item_data["user"] = item.get("user", str(item.uid))
+        item_data["group"] = item.get("group", str(item.gid))
+        item_data["uid"] = item.uid
+        item_data["gid"] = item.gid
+        item_data["path"] = remove_surrogates(item.path)
         if self.json_lines:
         if self.json_lines:
-            item_data['healthy'] = 'chunks_healthy' not in item
+            item_data["healthy"] = "chunks_healthy" not in item
         else:
         else:
-            item_data['bpath'] = item.path
-            item_data['extra'] = extra
-            item_data['health'] = 'broken' if 'chunks_healthy' in item else 'healthy'
-        item_data['source'] = source
-        item_data['linktarget'] = source
-        item_data['hlid'] = hlid
-        item_data['flags'] = item.get('bsdflags')
+            item_data["bpath"] = item.path
+            item_data["extra"] = extra
+            item_data["health"] = "broken" if "chunks_healthy" in item else "healthy"
+        item_data["source"] = source
+        item_data["linktarget"] = source
+        item_data["hlid"] = hlid
+        item_data["flags"] = item.get("bsdflags")
         for key in self.used_call_keys:
         for key in self.used_call_keys:
             item_data[key] = self.call_keys[key](item)
             item_data[key] = self.call_keys[key](item)
         return item_data
         return item_data
@@ -801,21 +826,21 @@ class ItemFormatter(BaseFormatter):
                        the metadata needed from the chunk
                        the metadata needed from the chunk
         """
         """
         chunk_index = self.archive.cache.chunks
         chunk_index = self.archive.cache.chunks
-        chunks = item.get('chunks', [])
+        chunks = item.get("chunks", [])
         chunks_counter = Counter(c.id for c in chunks)
         chunks_counter = Counter(c.id for c in chunks)
         return sum(metadata_func(c) for c in chunks if chunk_index[c.id].refcount == chunks_counter[c.id])
         return sum(metadata_func(c) for c in chunks if chunk_index[c.id].refcount == chunks_counter[c.id])
 
 
     def calculate_num_chunks(self, item):
     def calculate_num_chunks(self, item):
-        return len(item.get('chunks', []))
+        return len(item.get("chunks", []))
 
 
     def calculate_size(self, item):
     def calculate_size(self, item):
         # note: does not support hardlink slaves, they will be size 0
         # note: does not support hardlink slaves, they will be size 0
         return item.get_size()
         return item.get_size()
 
 
     def hash_item(self, hash_function, item):
     def hash_item(self, hash_function, item):
-        if 'chunks' not in item:
+        if "chunks" not in item:
             return ""
             return ""
-        if hash_function == 'xxh64':
+        if hash_function == "xxh64":
             hash = self.xxh64()
             hash = self.xxh64()
         elif hash_function in self.hash_algorithms:
         elif hash_function in self.hash_algorithms:
             hash = hashlib.new(hash_function)
             hash = hashlib.new(hash_function)
@@ -832,18 +857,18 @@ class ItemFormatter(BaseFormatter):
 
 
 def file_status(mode):
 def file_status(mode):
     if stat.S_ISREG(mode):
     if stat.S_ISREG(mode):
-        return 'A'
+        return "A"
     elif stat.S_ISDIR(mode):
     elif stat.S_ISDIR(mode):
-        return 'd'
+        return "d"
     elif stat.S_ISBLK(mode):
     elif stat.S_ISBLK(mode):
-        return 'b'
+        return "b"
     elif stat.S_ISCHR(mode):
     elif stat.S_ISCHR(mode):
-        return 'c'
+        return "c"
     elif stat.S_ISLNK(mode):
     elif stat.S_ISLNK(mode):
-        return 's'
+        return "s"
     elif stat.S_ISFIFO(mode):
     elif stat.S_ISFIFO(mode):
-        return 'f'
-    return '?'
+        return "f"
+    return "?"
 
 
 
 
 def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True):
 def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True):
@@ -868,7 +893,7 @@ def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comme
             line = line.rstrip(rstrip)
             line = line.rstrip(rstrip)
         if remove_empty and not line:
         if remove_empty and not line:
             continue
             continue
-        if remove_comments and line.startswith('#'):
+        if remove_comments and line.startswith("#"):
             continue
             continue
         yield line
         yield line
 
 
@@ -883,6 +908,7 @@ def swidth_slice(string, max_width):
     Latin characters are usually one cell wide, many CJK characters are two cells wide.
     Latin characters are usually one cell wide, many CJK characters are two cells wide.
     """
     """
     from ..platform import swidth
     from ..platform import swidth
+
     reverse = max_width < 0
     reverse = max_width < 0
     max_width = abs(max_width)
     max_width = abs(max_width)
     if reverse:
     if reverse:
@@ -896,7 +922,7 @@ def swidth_slice(string, max_width):
         result.append(character)
         result.append(character)
     if reverse:
     if reverse:
         result.reverse()
         result.reverse()
-    return ''.join(result)
+    return "".join(result)
 
 
 
 
 def ellipsis_truncate(msg, space):
 def ellipsis_truncate(msg, space):
@@ -905,15 +931,15 @@ def ellipsis_truncate(msg, space):
     this_is_a_very_long_string -------> this_is..._string
     this_is_a_very_long_string -------> this_is..._string
     """
     """
     from ..platform import swidth
     from ..platform import swidth
-    ellipsis_width = swidth('...')
+
+    ellipsis_width = swidth("...")
     msg_width = swidth(msg)
     msg_width = swidth(msg)
     if space < 8:
     if space < 8:
         # if there is very little space, just show ...
         # if there is very little space, just show ...
-        return '...' + ' ' * (space - ellipsis_width)
+        return "..." + " " * (space - ellipsis_width)
     if space < ellipsis_width + msg_width:
     if space < ellipsis_width + msg_width:
-        return '{}...{}'.format(swidth_slice(msg, space // 2 - ellipsis_width),
-                                swidth_slice(msg, -space // 2))
-    return msg + ' ' * (space - msg_width)
+        return "{}...{}".format(swidth_slice(msg, space // 2 - ellipsis_width), swidth_slice(msg, -space // 2))
+    return msg + " " * (space - msg_width)
 
 
 
 
 class BorgJsonEncoder(json.JSONEncoder):
 class BorgJsonEncoder(json.JSONEncoder):
@@ -922,23 +948,16 @@ class BorgJsonEncoder(json.JSONEncoder):
         from ..remote import RemoteRepository
         from ..remote import RemoteRepository
         from ..archive import Archive
         from ..archive import Archive
         from ..cache import LocalCache, AdHocCache
         from ..cache import LocalCache, AdHocCache
+
         if isinstance(o, Repository) or isinstance(o, RemoteRepository):
         if isinstance(o, Repository) or isinstance(o, RemoteRepository):
-            return {
-                'id': bin_to_hex(o.id),
-                'location': o._location.canonical_path(),
-            }
+            return {"id": bin_to_hex(o.id), "location": o._location.canonical_path()}
         if isinstance(o, Archive):
         if isinstance(o, Archive):
             return o.info()
             return o.info()
         if isinstance(o, LocalCache):
         if isinstance(o, LocalCache):
-            return {
-                'path': o.path,
-                'stats': o.stats(),
-            }
+            return {"path": o.path, "stats": o.stats()}
         if isinstance(o, AdHocCache):
         if isinstance(o, AdHocCache):
-            return {
-                'stats': o.stats(),
-            }
-        if callable(getattr(o, 'to_json', None)):
+            return {"stats": o.stats()}
+        if callable(getattr(o, "to_json", None)):
             return o.to_json()
             return o.to_json()
         return super().default(o)
         return super().default(o)
 
 
@@ -946,17 +965,12 @@ class BorgJsonEncoder(json.JSONEncoder):
 def basic_json_data(manifest, *, cache=None, extra=None):
 def basic_json_data(manifest, *, cache=None, extra=None):
     key = manifest.key
     key = manifest.key
     data = extra or {}
     data = extra or {}
-    data.update({
-        'repository': BorgJsonEncoder().default(manifest.repository),
-        'encryption': {
-            'mode': key.ARG_NAME,
-        },
-    })
-    data['repository']['last_modified'] = OutputTimestamp(manifest.last_timestamp.replace(tzinfo=timezone.utc))
-    if key.NAME.startswith('key file'):
-        data['encryption']['keyfile'] = key.find_key()
+    data.update({"repository": BorgJsonEncoder().default(manifest.repository), "encryption": {"mode": key.ARG_NAME}})
+    data["repository"]["last_modified"] = OutputTimestamp(manifest.last_timestamp.replace(tzinfo=timezone.utc))
+    if key.NAME.startswith("key file"):
+        data["encryption"]["keyfile"] = key.find_key()
     if cache:
     if cache:
-        data['cache'] = cache
+        data["cache"] = cache
     return data
     return data
 
 
 
 
@@ -975,13 +989,13 @@ def prepare_dump_dict(d):
         # look nice and chunk ids should mostly show in hex. Use a special
         # look nice and chunk ids should mostly show in hex. Use a special
         # inband signaling character (ASCII DEL) to distinguish between
         # inband signaling character (ASCII DEL) to distinguish between
         # decoded and hex mode.
         # decoded and hex mode.
-        if not value.startswith(b'\x7f'):
+        if not value.startswith(b"\x7f"):
             try:
             try:
                 value = value.decode()
                 value = value.decode()
                 return value
                 return value
             except UnicodeDecodeError:
             except UnicodeDecodeError:
                 pass
                 pass
-        return '\u007f' + bin_to_hex(value)
+        return "\u007f" + bin_to_hex(value)
 
 
     def decode_tuple(t):
     def decode_tuple(t):
         res = []
         res = []

+ 36 - 30
src/borg/helpers/passphrase.py

@@ -39,7 +39,7 @@ class Passphrase(str):
 
 
     @classmethod
     @classmethod
     def env_passphrase(cls, default=None):
     def env_passphrase(cls, default=None):
-        passphrase = cls._env_passphrase('BORG_PASSPHRASE', default)
+        passphrase = cls._env_passphrase("BORG_PASSPHRASE", default)
         if passphrase is not None:
         if passphrase is not None:
             return passphrase
             return passphrase
         passphrase = cls.env_passcommand()
         passphrase = cls.env_passcommand()
@@ -51,7 +51,7 @@ class Passphrase(str):
 
 
     @classmethod
     @classmethod
     def env_passcommand(cls, default=None):
     def env_passcommand(cls, default=None):
-        passcommand = os.environ.get('BORG_PASSCOMMAND', None)
+        passcommand = os.environ.get("BORG_PASSCOMMAND", None)
         if passcommand is not None:
         if passcommand is not None:
             # passcommand is a system command (not inside pyinstaller env)
             # passcommand is a system command (not inside pyinstaller env)
             env = prepare_subprocess_env(system=True)
             env = prepare_subprocess_env(system=True)
@@ -59,21 +59,21 @@ class Passphrase(str):
                 passphrase = subprocess.check_output(shlex.split(passcommand), universal_newlines=True, env=env)
                 passphrase = subprocess.check_output(shlex.split(passcommand), universal_newlines=True, env=env)
             except (subprocess.CalledProcessError, FileNotFoundError) as e:
             except (subprocess.CalledProcessError, FileNotFoundError) as e:
                 raise PasscommandFailure(e)
                 raise PasscommandFailure(e)
-            return cls(passphrase.rstrip('\n'))
+            return cls(passphrase.rstrip("\n"))
 
 
     @classmethod
     @classmethod
     def fd_passphrase(cls):
     def fd_passphrase(cls):
         try:
         try:
-            fd = int(os.environ.get('BORG_PASSPHRASE_FD'))
+            fd = int(os.environ.get("BORG_PASSPHRASE_FD"))
         except (ValueError, TypeError):
         except (ValueError, TypeError):
             return None
             return None
-        with os.fdopen(fd, mode='r') as f:
+        with os.fdopen(fd, mode="r") as f:
             passphrase = f.read()
             passphrase = f.read()
-        return cls(passphrase.rstrip('\n'))
+        return cls(passphrase.rstrip("\n"))
 
 
     @classmethod
     @classmethod
     def env_new_passphrase(cls, default=None):
     def env_new_passphrase(cls, default=None):
-        return cls._env_passphrase('BORG_NEW_PASSPHRASE', default)
+        return cls._env_passphrase("BORG_NEW_PASSPHRASE", default)
 
 
     @classmethod
     @classmethod
     def getpass(cls, prompt):
     def getpass(cls, prompt):
@@ -83,32 +83,38 @@ class Passphrase(str):
             if prompt:
             if prompt:
                 print()  # avoid err msg appearing right of prompt
                 print()  # avoid err msg appearing right of prompt
             msg = []
             msg = []
-            for env_var in 'BORG_PASSPHRASE', 'BORG_PASSCOMMAND':
+            for env_var in "BORG_PASSPHRASE", "BORG_PASSCOMMAND":
                 env_var_set = os.environ.get(env_var) is not None
                 env_var_set = os.environ.get(env_var) is not None
-                msg.append('{} is {}.'.format(env_var, 'set' if env_var_set else 'not set'))
-            msg.append('Interactive password query failed.')
-            raise NoPassphraseFailure(' '.join(msg)) from None
+                msg.append("{} is {}.".format(env_var, "set" if env_var_set else "not set"))
+            msg.append("Interactive password query failed.")
+            raise NoPassphraseFailure(" ".join(msg)) from None
         else:
         else:
             return cls(pw)
             return cls(pw)
 
 
     @classmethod
     @classmethod
     def verification(cls, passphrase):
     def verification(cls, passphrase):
-        msg = 'Do you want your passphrase to be displayed for verification? [yN]: '
-        if yes(msg, retry_msg=msg, invalid_msg='Invalid answer, try again.',
-               retry=True, env_var_override='BORG_DISPLAY_PASSPHRASE'):
-            print('Your passphrase (between double-quotes): "%s"' % passphrase,
-                  file=sys.stderr)
-            print('Make sure the passphrase displayed above is exactly what you wanted.',
-                  file=sys.stderr)
+        msg = "Do you want your passphrase to be displayed for verification? [yN]: "
+        if yes(
+            msg,
+            retry_msg=msg,
+            invalid_msg="Invalid answer, try again.",
+            retry=True,
+            env_var_override="BORG_DISPLAY_PASSPHRASE",
+        ):
+            print('Your passphrase (between double-quotes): "%s"' % passphrase, file=sys.stderr)
+            print("Make sure the passphrase displayed above is exactly what you wanted.", file=sys.stderr)
             try:
             try:
-                passphrase.encode('ascii')
+                passphrase.encode("ascii")
             except UnicodeEncodeError:
             except UnicodeEncodeError:
-                print('Your passphrase (UTF-8 encoding in hex): %s' %
-                      bin_to_hex(passphrase.encode('utf-8')),
-                      file=sys.stderr)
-                print('As you have a non-ASCII passphrase, it is recommended to keep the '
-                      'UTF-8 encoding in hex together with the passphrase at a safe place.',
-                      file=sys.stderr)
+                print(
+                    "Your passphrase (UTF-8 encoding in hex): %s" % bin_to_hex(passphrase.encode("utf-8")),
+                    file=sys.stderr,
+                )
+                print(
+                    "As you have a non-ASCII passphrase, it is recommended to keep the "
+                    "UTF-8 encoding in hex together with the passphrase at a safe place.",
+                    file=sys.stderr,
+                )
 
 
     @classmethod
     @classmethod
     def new(cls, allow_empty=False):
     def new(cls, allow_empty=False):
@@ -119,17 +125,17 @@ class Passphrase(str):
         if passphrase is not None:
         if passphrase is not None:
             return passphrase
             return passphrase
         for retry in range(1, 11):
         for retry in range(1, 11):
-            passphrase = cls.getpass('Enter new passphrase: ')
+            passphrase = cls.getpass("Enter new passphrase: ")
             if allow_empty or passphrase:
             if allow_empty or passphrase:
-                passphrase2 = cls.getpass('Enter same passphrase again: ')
+                passphrase2 = cls.getpass("Enter same passphrase again: ")
                 if passphrase == passphrase2:
                 if passphrase == passphrase2:
                     cls.verification(passphrase)
                     cls.verification(passphrase)
-                    logger.info('Remember your passphrase. Your data will be inaccessible without it.')
+                    logger.info("Remember your passphrase. Your data will be inaccessible without it.")
                     return passphrase
                     return passphrase
                 else:
                 else:
-                    print('Passphrases do not match', file=sys.stderr)
+                    print("Passphrases do not match", file=sys.stderr)
             else:
             else:
-                print('Passphrase must not be blank', file=sys.stderr)
+                print("Passphrase must not be blank", file=sys.stderr)
         else:
         else:
             raise PasswordRetriesExceeded
             raise PasswordRetriesExceeded
 
 

+ 49 - 39
src/borg/helpers/process.py

@@ -13,6 +13,7 @@ from .. import __version__
 
 
 from ..platformflags import is_win32, is_linux, is_freebsd, is_darwin
 from ..platformflags import is_win32, is_linux, is_freebsd, is_darwin
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_SIGNAL_BASE, Error
 from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_SIGNAL_BASE, Error
@@ -21,6 +22,7 @@ from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_SIGNAL_BASE, Error
 @contextlib.contextmanager
 @contextlib.contextmanager
 def _daemonize():
 def _daemonize():
     from ..platform import get_process_id
     from ..platform import get_process_id
+
     old_id = get_process_id()
     old_id = get_process_id()
     pid = os.fork()
     pid = os.fork()
     if pid:
     if pid:
@@ -30,13 +32,13 @@ def _daemonize():
         except _ExitCodeException as e:
         except _ExitCodeException as e:
             exit_code = e.exit_code
             exit_code = e.exit_code
         finally:
         finally:
-            logger.debug('Daemonizing: Foreground process (%s, %s, %s) is now dying.' % old_id)
+            logger.debug("Daemonizing: Foreground process (%s, %s, %s) is now dying." % old_id)
             os._exit(exit_code)
             os._exit(exit_code)
     os.setsid()
     os.setsid()
     pid = os.fork()
     pid = os.fork()
     if pid:
     if pid:
         os._exit(0)
         os._exit(0)
-    os.chdir('/')
+    os.chdir("/")
     os.close(0)
     os.close(0)
     os.close(1)
     os.close(1)
     fd = os.open(os.devnull, os.O_RDWR)
     fd = os.open(os.devnull, os.O_RDWR)
@@ -78,12 +80,12 @@ def daemonizing(*, timeout=5):
     with _daemonize() as (old_id, new_id):
     with _daemonize() as (old_id, new_id):
         if new_id is None:
         if new_id is None:
             # The original / parent process, waiting for a signal to die.
             # The original / parent process, waiting for a signal to die.
-            logger.debug('Daemonizing: Foreground process (%s, %s, %s) is waiting for background process...' % old_id)
+            logger.debug("Daemonizing: Foreground process (%s, %s, %s) is waiting for background process..." % old_id)
             exit_code = EXIT_SUCCESS
             exit_code = EXIT_SUCCESS
             # Indeed, SIGHUP and SIGTERM handlers should have been set on archiver.run(). Just in case...
             # Indeed, SIGHUP and SIGTERM handlers should have been set on archiver.run(). Just in case...
-            with signal_handler('SIGINT', raising_signal_handler(KeyboardInterrupt)), \
-                 signal_handler('SIGHUP', raising_signal_handler(SigHup)), \
-                 signal_handler('SIGTERM', raising_signal_handler(SigTerm)):
+            with signal_handler("SIGINT", raising_signal_handler(KeyboardInterrupt)), signal_handler(
+                "SIGHUP", raising_signal_handler(SigHup)
+            ), signal_handler("SIGTERM", raising_signal_handler(SigTerm)):
                 try:
                 try:
                     if timeout > 0:
                     if timeout > 0:
                         time.sleep(timeout)
                         time.sleep(timeout)
@@ -96,15 +98,17 @@ def daemonizing(*, timeout=5):
                     exit_code = EXIT_WARNING
                     exit_code = EXIT_WARNING
                 except KeyboardInterrupt:
                 except KeyboardInterrupt:
                     # Manual termination.
                     # Manual termination.
-                    logger.debug('Daemonizing: Foreground process (%s, %s, %s) received SIGINT.' % old_id)
+                    logger.debug("Daemonizing: Foreground process (%s, %s, %s) received SIGINT." % old_id)
                     exit_code = EXIT_SIGNAL_BASE + 2
                     exit_code = EXIT_SIGNAL_BASE + 2
                 except BaseException as e:
                 except BaseException as e:
                     # Just in case...
                     # Just in case...
-                    logger.warning('Daemonizing: Foreground process received an exception while waiting:\n' +
-                                   ''.join(traceback.format_exception(e.__class__, e, e.__traceback__)))
+                    logger.warning(
+                        "Daemonizing: Foreground process received an exception while waiting:\n"
+                        + "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
+                    )
                     exit_code = EXIT_WARNING
                     exit_code = EXIT_WARNING
                 else:
                 else:
-                    logger.warning('Daemonizing: Background process did not respond (timeout). Is it alive?')
+                    logger.warning("Daemonizing: Background process did not respond (timeout). Is it alive?")
                     exit_code = EXIT_WARNING
                     exit_code = EXIT_WARNING
                 finally:
                 finally:
                     # Don't call with-body, but die immediately!
                     # Don't call with-body, but die immediately!
@@ -113,22 +117,26 @@ def daemonizing(*, timeout=5):
 
 
         # The background / grandchild process.
         # The background / grandchild process.
         sig_to_foreground = signal.SIGTERM
         sig_to_foreground = signal.SIGTERM
-        logger.debug('Daemonizing: Background process (%s, %s, %s) is starting...' % new_id)
+        logger.debug("Daemonizing: Background process (%s, %s, %s) is starting..." % new_id)
         try:
         try:
             yield old_id, new_id
             yield old_id, new_id
         except BaseException as e:
         except BaseException as e:
             sig_to_foreground = signal.SIGHUP
             sig_to_foreground = signal.SIGHUP
-            logger.warning('Daemonizing: Background process raised an exception while starting:\n' +
-                           ''.join(traceback.format_exception(e.__class__, e, e.__traceback__)))
+            logger.warning(
+                "Daemonizing: Background process raised an exception while starting:\n"
+                + "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
+            )
             raise e
             raise e
         else:
         else:
-            logger.debug('Daemonizing: Background process (%s, %s, %s) has started.' % new_id)
+            logger.debug("Daemonizing: Background process (%s, %s, %s) has started." % new_id)
         finally:
         finally:
             try:
             try:
                 os.kill(old_id[1], sig_to_foreground)
                 os.kill(old_id[1], sig_to_foreground)
             except BaseException as e:
             except BaseException as e:
-                logger.error('Daemonizing: Trying to kill the foreground process raised an exception:\n' +
-                             ''.join(traceback.format_exception(e.__class__, e, e.__traceback__)))
+                logger.error(
+                    "Daemonizing: Trying to kill the foreground process raised an exception:\n"
+                    + "".join(traceback.format_exception(e.__class__, e, e.__traceback__))
+                )
 
 
 
 
 class _ExitCodeException(BaseException):
 class _ExitCodeException(BaseException):
@@ -187,7 +195,7 @@ class SigIntManager:
         self._sig_int_triggered = False
         self._sig_int_triggered = False
         self._action_triggered = False
         self._action_triggered = False
         self._action_done = False
         self._action_done = False
-        self.ctx = signal_handler('SIGINT', self.handler)
+        self.ctx = signal_handler("SIGINT", self.handler)
 
 
     def __bool__(self):
     def __bool__(self):
         # this will be True (and stay True) after the first Ctrl-C/SIGINT
         # this will be True (and stay True) after the first Ctrl-C/SIGINT
@@ -228,7 +236,7 @@ class SigIntManager:
 sig_int = SigIntManager()
 sig_int = SigIntManager()
 
 
 
 
-def popen_with_error_handling(cmd_line: str, log_prefix='', **kwargs):
+def popen_with_error_handling(cmd_line: str, log_prefix="", **kwargs):
     """
     """
     Handle typical errors raised by subprocess.Popen. Return None if an error occurred,
     Handle typical errors raised by subprocess.Popen. Return None if an error occurred,
     otherwise return the Popen object.
     otherwise return the Popen object.
@@ -240,27 +248,27 @@ def popen_with_error_handling(cmd_line: str, log_prefix='', **kwargs):
 
 
     Does not change the exit code.
     Does not change the exit code.
     """
     """
-    assert not kwargs.get('shell'), 'Sorry pal, shell mode is a no-no'
+    assert not kwargs.get("shell"), "Sorry pal, shell mode is a no-no"
     try:
     try:
         command = shlex.split(cmd_line)
         command = shlex.split(cmd_line)
         if not command:
         if not command:
-            raise ValueError('an empty command line is not permitted')
+            raise ValueError("an empty command line is not permitted")
     except ValueError as ve:
     except ValueError as ve:
-        logger.error('%s%s', log_prefix, ve)
+        logger.error("%s%s", log_prefix, ve)
         return
         return
-    logger.debug('%scommand line: %s', log_prefix, command)
+    logger.debug("%scommand line: %s", log_prefix, command)
     try:
     try:
         return subprocess.Popen(command, **kwargs)
         return subprocess.Popen(command, **kwargs)
     except FileNotFoundError:
     except FileNotFoundError:
-        logger.error('%sexecutable not found: %s', log_prefix, command[0])
+        logger.error("%sexecutable not found: %s", log_prefix, command[0])
         return
         return
     except PermissionError:
     except PermissionError:
-        logger.error('%spermission denied: %s', log_prefix, command[0])
+        logger.error("%spermission denied: %s", log_prefix, command[0])
         return
         return
 
 
 
 
 def is_terminal(fd=sys.stdout):
 def is_terminal(fd=sys.stdout):
-    return hasattr(fd, 'isatty') and fd.isatty() and (not is_win32 or 'ANSICON' in os.environ)
+    return hasattr(fd, "isatty") and fd.isatty() and (not is_win32 or "ANSICON" in os.environ)
 
 
 
 
 def prepare_subprocess_env(system, env=None):
 def prepare_subprocess_env(system, env=None):
@@ -278,8 +286,8 @@ def prepare_subprocess_env(system, env=None):
         # but we do not want that system binaries (like ssh or other) pick up
         # but we do not want that system binaries (like ssh or other) pick up
         # (non-matching) libraries from there.
         # (non-matching) libraries from there.
         # thus we install the original LDLP, before pyinstaller has modified it:
         # thus we install the original LDLP, before pyinstaller has modified it:
-        lp_key = 'LD_LIBRARY_PATH'
-        lp_orig = env.get(lp_key + '_ORIG')  # pyinstaller >= 20160820 / v3.2.1 has this
+        lp_key = "LD_LIBRARY_PATH"
+        lp_orig = env.get(lp_key + "_ORIG")  # pyinstaller >= 20160820 / v3.2.1 has this
         if lp_orig is not None:
         if lp_orig is not None:
             env[lp_key] = lp_orig
             env[lp_key] = lp_orig
         else:
         else:
@@ -292,12 +300,12 @@ def prepare_subprocess_env(system, env=None):
             #    in this case, we must kill LDLP.
             #    in this case, we must kill LDLP.
             #    We can recognize this via sys.frozen and sys._MEIPASS being set.
             #    We can recognize this via sys.frozen and sys._MEIPASS being set.
             lp = env.get(lp_key)
             lp = env.get(lp_key)
-            if lp is not None and getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
+            if lp is not None and getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
                 env.pop(lp_key)
                 env.pop(lp_key)
     # security: do not give secrets to subprocess
     # security: do not give secrets to subprocess
-    env.pop('BORG_PASSPHRASE', None)
+    env.pop("BORG_PASSPHRASE", None)
     # for information, give borg version to the subprocess
     # for information, give borg version to the subprocess
-    env['BORG_VERSION'] = __version__
+    env["BORG_VERSION"] = __version__
     return env
     return env
 
 
 
 
@@ -314,13 +322,15 @@ def create_filter_process(cmd, stream, stream_close, inbound=True):
         # communication with the process is a one-way road, i.e. the process can never block
         # communication with the process is a one-way road, i.e. the process can never block
         # for us to do something while we block on the process for something different.
         # for us to do something while we block on the process for something different.
         if inbound:
         if inbound:
-            proc = popen_with_error_handling(cmd, stdout=subprocess.PIPE, stdin=filter_stream,
-                                             log_prefix='filter-process: ', env=env)
+            proc = popen_with_error_handling(
+                cmd, stdout=subprocess.PIPE, stdin=filter_stream, log_prefix="filter-process: ", env=env
+            )
         else:
         else:
-            proc = popen_with_error_handling(cmd, stdin=subprocess.PIPE, stdout=filter_stream,
-                                             log_prefix='filter-process: ', env=env)
+            proc = popen_with_error_handling(
+                cmd, stdin=subprocess.PIPE, stdout=filter_stream, log_prefix="filter-process: ", env=env
+            )
         if not proc:
         if not proc:
-            raise Error(f'filter {cmd}: process creation failed')
+            raise Error(f"filter {cmd}: process creation failed")
         stream = proc.stdout if inbound else proc.stdin
         stream = proc.stdout if inbound else proc.stdin
         # inbound: do not close the pipe (this is the task of the filter process [== writer])
         # inbound: do not close the pipe (this is the task of the filter process [== writer])
         # outbound: close the pipe, otherwise the filter process would not notice when we are done.
         # outbound: close the pipe, otherwise the filter process would not notice when we are done.
@@ -331,7 +341,7 @@ def create_filter_process(cmd, stream, stream_close, inbound=True):
 
 
     except Exception:
     except Exception:
         # something went wrong with processing the stream by borg
         # something went wrong with processing the stream by borg
-        logger.debug('Exception, killing the filter...')
+        logger.debug("Exception, killing the filter...")
         if cmd:
         if cmd:
             proc.kill()
             proc.kill()
         borg_succeeded = False
         borg_succeeded = False
@@ -343,11 +353,11 @@ def create_filter_process(cmd, stream, stream_close, inbound=True):
             stream.close()
             stream.close()
 
 
         if cmd:
         if cmd:
-            logger.debug('Done, waiting for filter to die...')
+            logger.debug("Done, waiting for filter to die...")
             rc = proc.wait()
             rc = proc.wait()
-            logger.debug('filter cmd exited with code %d', rc)
+            logger.debug("filter cmd exited with code %d", rc)
             if filter_stream_close:
             if filter_stream_close:
                 filter_stream.close()
                 filter_stream.close()
             if borg_succeeded and rc:
             if borg_succeeded and rc:
                 # if borg did not succeed, we know that we killed the filter process
                 # if borg did not succeed, we know that we killed the filter process
-                raise Error('filter %s failed, rc=%d' % (cmd, rc))
+                raise Error("filter %s failed, rc=%d" % (cmd, rc))

+ 13 - 16
src/borg/helpers/progress.py

@@ -5,6 +5,7 @@ import time
 from shutil import get_terminal_size
 from shutil import get_terminal_size
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .parseformat import ellipsis_truncate
 from .parseformat import ellipsis_truncate
@@ -19,7 +20,7 @@ def justify_to_terminal_size(message):
 
 
 
 
 class ProgressIndicatorBase:
 class ProgressIndicatorBase:
-    LOGGER = 'borg.output.progress'
+    LOGGER = "borg.output.progress"
     JSON_TYPE = None
     JSON_TYPE = None
     json = False
     json = False
 
 
@@ -43,15 +44,15 @@ class ProgressIndicatorBase:
         if not self.logger.handlers:
         if not self.logger.handlers:
             self.handler = logging.StreamHandler(stream=sys.stderr)
             self.handler = logging.StreamHandler(stream=sys.stderr)
             self.handler.setLevel(logging.INFO)
             self.handler.setLevel(logging.INFO)
-            logger = logging.getLogger('borg')
+            logger = logging.getLogger("borg")
             # Some special attributes on the borg logger, created by setup_logging
             # Some special attributes on the borg logger, created by setup_logging
             # But also be able to work without that
             # But also be able to work without that
             try:
             try:
                 formatter = logger.formatter
                 formatter = logger.formatter
-                terminator = '\n' if logger.json else '\r'
+                terminator = "\n" if logger.json else "\r"
                 self.json = logger.json
                 self.json = logger.json
             except AttributeError:
             except AttributeError:
-                terminator = '\r'
+                terminator = "\r"
             else:
             else:
                 self.handler.setFormatter(formatter)
                 self.handler.setFormatter(formatter)
             self.handler.terminator = terminator
             self.handler.terminator = terminator
@@ -79,24 +80,20 @@ class ProgressIndicatorBase:
         assert self.json
         assert self.json
         if not self.emit:
         if not self.emit:
             return
             return
-        kwargs.update(dict(
-            operation=self.id,
-            msgid=self.msgid,
-            type=self.JSON_TYPE,
-            finished=finished,
-            time=time.time(),
-        ))
+        kwargs.update(
+            dict(operation=self.id, msgid=self.msgid, type=self.JSON_TYPE, finished=finished, time=time.time())
+        )
         print(json.dumps(kwargs), file=sys.stderr, flush=True)
         print(json.dumps(kwargs), file=sys.stderr, flush=True)
 
 
     def finish(self):
     def finish(self):
         if self.json:
         if self.json:
             self.output_json(finished=True)
             self.output_json(finished=True)
         else:
         else:
-            self.output('')
+            self.output("")
 
 
 
 
 class ProgressIndicatorMessage(ProgressIndicatorBase):
 class ProgressIndicatorMessage(ProgressIndicatorBase):
-    JSON_TYPE = 'progress_message'
+    JSON_TYPE = "progress_message"
 
 
     def output(self, msg):
     def output(self, msg):
         if self.json:
         if self.json:
@@ -106,7 +103,7 @@ class ProgressIndicatorMessage(ProgressIndicatorBase):
 
 
 
 
 class ProgressIndicatorPercent(ProgressIndicatorBase):
 class ProgressIndicatorPercent(ProgressIndicatorBase):
-    JSON_TYPE = 'progress_percent'
+    JSON_TYPE = "progress_percent"
 
 
     def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None):
     def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None):
         """
         """
@@ -150,7 +147,7 @@ class ProgressIndicatorPercent(ProgressIndicatorBase):
                     # no need to truncate if we're not outputting to a terminal
                     # no need to truncate if we're not outputting to a terminal
                     terminal_space = get_terminal_size(fallback=(-1, -1))[0]
                     terminal_space = get_terminal_size(fallback=(-1, -1))[0]
                     if terminal_space != -1:
                     if terminal_space != -1:
-                        space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + ['']))
+                        space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + [""]))
                         info[-1] = ellipsis_truncate(info[-1], space)
                         info[-1] = ellipsis_truncate(info[-1], space)
                 return self.output(self.msg % tuple([pct] + info), justify=False, info=info)
                 return self.output(self.msg % tuple([pct] + info), justify=False, info=info)
 
 
@@ -193,7 +190,7 @@ class ProgressIndicatorEndless:
             return self.output(self.triggered)
             return self.output(self.triggered)
 
 
     def output(self, triggered):
     def output(self, triggered):
-        print('.', end='', file=self.file, flush=True)
+        print(".", end="", file=self.file, flush=True)
 
 
     def finish(self):
     def finish(self):
         print(file=self.file)
         print(file=self.file)

+ 21 - 17
src/borg/helpers/time.py

@@ -12,7 +12,7 @@ def to_localtime(ts):
 
 
 def parse_timestamp(timestamp, tzinfo=timezone.utc):
 def parse_timestamp(timestamp, tzinfo=timezone.utc):
     """Parse a ISO 8601 timestamp string"""
     """Parse a ISO 8601 timestamp string"""
-    fmt = ISO_FORMAT if '.' in timestamp else ISO_FORMAT_NO_USECS
+    fmt = ISO_FORMAT if "." in timestamp else ISO_FORMAT_NO_USECS
     dt = datetime.strptime(timestamp, fmt)
     dt = datetime.strptime(timestamp, fmt)
     if tzinfo is not None:
     if tzinfo is not None:
         dt = dt.replace(tzinfo=tzinfo)
         dt = dt.replace(tzinfo=tzinfo)
@@ -27,11 +27,16 @@ def timestamp(s):
         return datetime.fromtimestamp(ts, tz=timezone.utc)
         return datetime.fromtimestamp(ts, tz=timezone.utc)
     except OSError:
     except OSError:
         # didn't work, try parsing as timestamp. UTC, no TZ, no microsecs support.
         # didn't work, try parsing as timestamp. UTC, no TZ, no microsecs support.
-        for format in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S+00:00',
-                       '%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S',
-                       '%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M',
-                       '%Y-%m-%d', '%Y-%j',
-                       ):
+        for format in (
+            "%Y-%m-%dT%H:%M:%SZ",
+            "%Y-%m-%dT%H:%M:%S+00:00",
+            "%Y-%m-%dT%H:%M:%S",
+            "%Y-%m-%d %H:%M:%S",
+            "%Y-%m-%dT%H:%M",
+            "%Y-%m-%d %H:%M",
+            "%Y-%m-%d",
+            "%Y-%j",
+        ):
             try:
             try:
                 return datetime.strptime(s, format).replace(tzinfo=timezone.utc)
                 return datetime.strptime(s, format).replace(tzinfo=timezone.utc)
             except ValueError:
             except ValueError:
@@ -54,7 +59,7 @@ if SUPPORT_32BIT_PLATFORMS:
     # subtract last 48h to avoid any issues that could be caused by tz calculations.
     # subtract last 48h to avoid any issues that could be caused by tz calculations.
     # this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
     # this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
     # msgpack can pack up to uint64.
     # msgpack can pack up to uint64.
-    MAX_S = 2**31-1 - 48*3600
+    MAX_S = 2**31 - 1 - 48 * 3600
     MAX_NS = MAX_S * 1000000000
     MAX_NS = MAX_S * 1000000000
 else:
 else:
     # nanosecond timestamps will fit into a signed int64.
     # nanosecond timestamps will fit into a signed int64.
@@ -62,7 +67,7 @@ else:
     # this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
     # this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
     # round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S.
     # round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S.
     # msgpack can pack up to uint64.
     # msgpack can pack up to uint64.
-    MAX_NS = (2**63-1 - 48*3600*1000000000) // 1000000000 * 1000000000
+    MAX_NS = (2**63 - 1 - 48 * 3600 * 1000000000) // 1000000000 * 1000000000
     MAX_S = MAX_NS // 1000000000
     MAX_S = MAX_NS // 1000000000
 
 
 
 
@@ -89,11 +94,11 @@ def safe_timestamp(item_timestamp_ns):
     return datetime.fromtimestamp(t_ns / 1e9)
     return datetime.fromtimestamp(t_ns / 1e9)
 
 
 
 
-def format_time(ts: datetime, format_spec=''):
+def format_time(ts: datetime, format_spec=""):
     """
     """
     Convert *ts* to a human-friendly format with textual weekday.
     Convert *ts* to a human-friendly format with textual weekday.
     """
     """
-    return ts.strftime('%a, %Y-%m-%d %H:%M:%S' if format_spec == '' else format_spec)
+    return ts.strftime("%a, %Y-%m-%d %H:%M:%S" if format_spec == "" else format_spec)
 
 
 
 
 def isoformat_time(ts: datetime):
 def isoformat_time(ts: datetime):
@@ -105,19 +110,18 @@ def isoformat_time(ts: datetime):
 
 
 
 
 def format_timedelta(td):
 def format_timedelta(td):
-    """Format timedelta in a human friendly format
-    """
+    """Format timedelta in a human friendly format"""
     ts = td.total_seconds()
     ts = td.total_seconds()
     s = ts % 60
     s = ts % 60
     m = int(ts / 60) % 60
     m = int(ts / 60) % 60
     h = int(ts / 3600) % 24
     h = int(ts / 3600) % 24
-    txt = '%.2f seconds' % s
+    txt = "%.2f seconds" % s
     if m:
     if m:
-        txt = '%d minutes %s' % (m, txt)
+        txt = "%d minutes %s" % (m, txt)
     if h:
     if h:
-        txt = '%d hours %s' % (h, txt)
+        txt = "%d hours %s" % (h, txt)
     if td.days:
     if td.days:
-        txt = '%d days %s' % (td.days, txt)
+        txt = "%d days %s" % (td.days, txt)
     return txt
     return txt
 
 
 
 
@@ -131,7 +135,7 @@ class OutputTimestamp:
         return format_time(self.ts, format_spec=format_spec)
         return format_time(self.ts, format_spec=format_spec)
 
 
     def __str__(self):
     def __str__(self):
-        return f'{self}'
+        return f"{self}"
 
 
     def isoformat(self):
     def isoformat(self):
         return isoformat_time(self.ts)
         return isoformat_time(self.ts)

+ 33 - 22
src/borg/helpers/yes.py

@@ -4,16 +4,30 @@ import os
 import os.path
 import os.path
 import sys
 import sys
 
 
-FALSISH = ('No', 'NO', 'no', 'N', 'n', '0', )
-TRUISH = ('Yes', 'YES', 'yes', 'Y', 'y', '1', )
-DEFAULTISH = ('Default', 'DEFAULT', 'default', 'D', 'd', '', )
+FALSISH = ("No", "NO", "no", "N", "n", "0")
+TRUISH = ("Yes", "YES", "yes", "Y", "y", "1")
+DEFAULTISH = ("Default", "DEFAULT", "default", "D", "d", "")
 
 
 
 
-def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
-        retry_msg=None, invalid_msg=None, env_msg='{} (from {})',
-        falsish=FALSISH, truish=TRUISH, defaultish=DEFAULTISH,
-        default=False, retry=True, env_var_override=None, ofile=None, input=input, prompt=True,
-        msgid=None):
+def yes(
+    msg=None,
+    false_msg=None,
+    true_msg=None,
+    default_msg=None,
+    retry_msg=None,
+    invalid_msg=None,
+    env_msg="{} (from {})",
+    falsish=FALSISH,
+    truish=TRUISH,
+    defaultish=DEFAULTISH,
+    default=False,
+    retry=True,
+    env_var_override=None,
+    ofile=None,
+    input=input,
+    prompt=True,
+    msgid=None,
+):
     """Output <msg> (usually a question) and let user input an answer.
     """Output <msg> (usually a question) and let user input an answer.
     Qualifies the answer according to falsish, truish and defaultish as True, False or <default>.
     Qualifies the answer according to falsish, truish and defaultish as True, False or <default>.
     If it didn't qualify and retry is False (no retries wanted), return the default [which
     If it didn't qualify and retry is False (no retries wanted), return the default [which
@@ -43,18 +57,15 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
     :param input: input function [input from builtins]
     :param input: input function [input from builtins]
     :return: boolean answer value, True or False
     :return: boolean answer value, True or False
     """
     """
+
     def output(msg, msg_type, is_prompt=False, **kwargs):
     def output(msg, msg_type, is_prompt=False, **kwargs):
-        json_output = getattr(logging.getLogger('borg'), 'json', False)
+        json_output = getattr(logging.getLogger("borg"), "json", False)
         if json_output:
         if json_output:
-            kwargs.update(dict(
-                type='question_%s' % msg_type,
-                msgid=msgid,
-                message=msg,
-            ))
+            kwargs.update(dict(type="question_%s" % msg_type, msgid=msgid, message=msg))
             print(json.dumps(kwargs), file=sys.stderr)
             print(json.dumps(kwargs), file=sys.stderr)
         else:
         else:
             if is_prompt:
             if is_prompt:
-                print(msg, file=ofile, end='', flush=True)
+                print(msg, file=ofile, end="", flush=True)
             else:
             else:
                 print(msg, file=ofile)
                 print(msg, file=ofile)
 
 
@@ -66,13 +77,13 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
     if default not in (True, False):
     if default not in (True, False):
         raise ValueError("invalid default value, must be True or False")
         raise ValueError("invalid default value, must be True or False")
     if msg:
     if msg:
-        output(msg, 'prompt', is_prompt=True)
+        output(msg, "prompt", is_prompt=True)
     while True:
     while True:
         answer = None
         answer = None
         if env_var_override:
         if env_var_override:
             answer = os.environ.get(env_var_override)
             answer = os.environ.get(env_var_override)
             if answer is not None and env_msg:
             if answer is not None and env_msg:
-                output(env_msg.format(answer, env_var_override), 'env_answer', env_var=env_var_override)
+                output(env_msg.format(answer, env_var_override), "env_answer", env_var=env_var_override)
         if answer is None:
         if answer is None:
             if not prompt:
             if not prompt:
                 return default
                 return default
@@ -83,22 +94,22 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
                 answer = truish[0] if default else falsish[0]
                 answer = truish[0] if default else falsish[0]
         if answer in defaultish:
         if answer in defaultish:
             if default_msg:
             if default_msg:
-                output(default_msg, 'accepted_default')
+                output(default_msg, "accepted_default")
             return default
             return default
         if answer in truish:
         if answer in truish:
             if true_msg:
             if true_msg:
-                output(true_msg, 'accepted_true')
+                output(true_msg, "accepted_true")
             return True
             return True
         if answer in falsish:
         if answer in falsish:
             if false_msg:
             if false_msg:
-                output(false_msg, 'accepted_false')
+                output(false_msg, "accepted_false")
             return False
             return False
         # if we get here, the answer was invalid
         # if we get here, the answer was invalid
         if invalid_msg:
         if invalid_msg:
-            output(invalid_msg, 'invalid_answer')
+            output(invalid_msg, "invalid_answer")
         if not retry:
         if not retry:
             return default
             return default
         if retry_msg:
         if retry_msg:
-            output(retry_msg, 'prompt_retry', is_prompt=True)
+            output(retry_msg, "prompt_retry", is_prompt=True)
         # in case we used an environment variable and it gave an invalid answer, do not use it again:
         # in case we used an environment variable and it gave an invalid answer, do not use it again:
         env_var_override = None
         env_var_override = None

+ 23 - 16
src/borg/locking.py

@@ -8,8 +8,8 @@ from . import platform
 from .helpers import Error, ErrorWithTraceback
 from .helpers import Error, ErrorWithTraceback
 from .logger import create_logger
 from .logger import create_logger
 
 
-ADD, REMOVE = 'add', 'remove'
-SHARED, EXCLUSIVE = 'shared', 'exclusive'
+ADD, REMOVE = "add", "remove"
+SHARED, EXCLUSIVE = "shared", "exclusive"
 
 
 logger = create_logger(__name__)
 logger = create_logger(__name__)
 
 
@@ -20,6 +20,7 @@ class TimeoutTimer:
     It can also compute and optionally execute a reasonable sleep time (e.g. to avoid
     It can also compute and optionally execute a reasonable sleep time (e.g. to avoid
     polling too often or to support thread/process rescheduling).
     polling too often or to support thread/process rescheduling).
     """
     """
+
     def __init__(self, timeout=None, sleep=None):
     def __init__(self, timeout=None, sleep=None):
         """
         """
         Initialize a timer.
         Initialize a timer.
@@ -43,8 +44,8 @@ class TimeoutTimer:
 
 
     def __repr__(self):
     def __repr__(self):
         return "<{}: start={!r} end={!r} timeout={!r} sleep={!r}>".format(
         return "<{}: start={!r} end={!r} timeout={!r} sleep={!r}>".format(
-            self.__class__.__name__, self.start_time, self.end_time,
-            self.timeout_interval, self.sleep_interval)
+            self.__class__.__name__, self.start_time, self.end_time, self.timeout_interval, self.sleep_interval
+        )
 
 
     def start(self):
     def start(self):
         self.start_time = time.time()
         self.start_time = time.time()
@@ -102,6 +103,7 @@ class ExclusiveLock:
     This makes sure the lock is released again if the block is left, no
     This makes sure the lock is released again if the block is left, no
     matter how (e.g. if an exception occurred).
     matter how (e.g. if an exception occurred).
     """
     """
+
     def __init__(self, path, timeout=None, sleep=None, id=None):
     def __init__(self, path, timeout=None, sleep=None, id=None):
         self.timeout = timeout
         self.timeout = timeout
         self.sleep = sleep
         self.sleep = sleep
@@ -129,7 +131,7 @@ class ExclusiveLock:
         unique_base_name = os.path.basename(self.unique_name)
         unique_base_name = os.path.basename(self.unique_name)
         temp_path = None
         temp_path = None
         try:
         try:
-            temp_path = tempfile.mkdtemp(".tmp", base_name + '.', parent_path)
+            temp_path = tempfile.mkdtemp(".tmp", base_name + ".", parent_path)
             temp_unique_name = os.path.join(temp_path, unique_base_name)
             temp_unique_name = os.path.join(temp_path, unique_base_name)
             with open(temp_unique_name, "wb"):
             with open(temp_unique_name, "wb"):
                 pass
                 pass
@@ -192,8 +194,8 @@ class ExclusiveLock:
         else:
         else:
             for name in names:
             for name in names:
                 try:
                 try:
-                    host_pid, thread_str = name.rsplit('-', 1)
-                    host, pid_str = host_pid.rsplit('.', 1)
+                    host_pid, thread_str = name.rsplit("-", 1)
+                    host, pid_str = host_pid.rsplit(".", 1)
                     pid = int(pid_str)
                     pid = int(pid_str)
                     thread = int(thread_str)
                     thread = int(thread_str)
                 except ValueError:
                 except ValueError:
@@ -207,17 +209,19 @@ class ExclusiveLock:
                 if not self.kill_stale_locks:
                 if not self.kill_stale_locks:
                     if not self.stale_warning_printed:
                     if not self.stale_warning_printed:
                         # Log this at warning level to hint the user at the ability
                         # Log this at warning level to hint the user at the ability
-                        logger.warning("Found stale lock %s, but not deleting because self.kill_stale_locks = False.", name)
+                        logger.warning(
+                            "Found stale lock %s, but not deleting because self.kill_stale_locks = False.", name
+                        )
                         self.stale_warning_printed = True
                         self.stale_warning_printed = True
                     return False
                     return False
 
 
                 try:
                 try:
                     os.unlink(os.path.join(self.path, name))
                     os.unlink(os.path.join(self.path, name))
-                    logger.warning('Killed stale lock %s.', name)
+                    logger.warning("Killed stale lock %s.", name)
                 except OSError as err:
                 except OSError as err:
                     if not self.stale_warning_printed:
                     if not self.stale_warning_printed:
                         # This error will bubble up and likely result in locking failure
                         # This error will bubble up and likely result in locking failure
-                        logger.error('Found stale lock %s, but cannot delete due to %s', name, str(err))
+                        logger.error("Found stale lock %s, but cannot delete due to %s", name, str(err))
                         self.stale_warning_printed = True
                         self.stale_warning_printed = True
                     return False
                     return False
 
 
@@ -228,7 +232,7 @@ class ExclusiveLock:
                 # Directory is not empty or doesn't exist any more = we lost the race to somebody else--which is ok.
                 # Directory is not empty or doesn't exist any more = we lost the race to somebody else--which is ok.
                 return False
                 return False
             # EACCES or EIO or ... = we cannot operate anyway
             # EACCES or EIO or ... = we cannot operate anyway
-            logger.error('Failed to remove lock dir: %s', str(err))
+            logger.error("Failed to remove lock dir: %s", str(err))
             return False
             return False
 
 
         return True
         return True
@@ -257,6 +261,7 @@ class LockRoster:
     Note: you usually should call the methods with an exclusive lock held,
     Note: you usually should call the methods with an exclusive lock held,
     to avoid conflicting access by multiple threads/processes/machines.
     to avoid conflicting access by multiple threads/processes/machines.
     """
     """
+
     def __init__(self, path, id=None):
     def __init__(self, path, id=None):
         self.path = path
         self.path = path
         self.id = id or platform.get_process_id()
         self.id = id or platform.get_process_id()
@@ -279,8 +284,9 @@ class LockRoster:
                         if platform.process_alive(host, pid, thread):
                         if platform.process_alive(host, pid, thread):
                             elements.add((host, pid, thread))
                             elements.add((host, pid, thread))
                         else:
                         else:
-                            logger.warning('Removed stale %s roster lock for host %s pid %d thread %d.',
-                                           key, host, pid, thread)
+                            logger.warning(
+                                "Removed stale %s roster lock for host %s pid %d thread %d.", key, host, pid, thread
+                            )
                     data[key] = list(elements)
                     data[key] = list(elements)
         except (FileNotFoundError, ValueError):
         except (FileNotFoundError, ValueError):
             # no or corrupt/empty roster file?
             # no or corrupt/empty roster file?
@@ -315,7 +321,7 @@ class LockRoster:
         elif op == REMOVE:
         elif op == REMOVE:
             elements.remove(self.id)
             elements.remove(self.id)
         else:
         else:
-            raise ValueError('Unknown LockRoster op %r' % op)
+            raise ValueError("Unknown LockRoster op %r" % op)
         roster[key] = list(list(e) for e in elements)
         roster[key] = list(list(e) for e in elements)
         self.save(roster)
         self.save(roster)
 
 
@@ -354,6 +360,7 @@ class Lock:
     This makes sure the lock is released again if the block is left, no
     This makes sure the lock is released again if the block is left, no
     matter how (e.g. if an exception occurred).
     matter how (e.g. if an exception occurred).
     """
     """
+
     def __init__(self, path, exclusive=False, sleep=None, timeout=None, id=None):
     def __init__(self, path, exclusive=False, sleep=None, timeout=None, id=None):
         self.path = path
         self.path = path
         self.is_exclusive = exclusive
         self.is_exclusive = exclusive
@@ -361,11 +368,11 @@ class Lock:
         self.timeout = timeout
         self.timeout = timeout
         self.id = id or platform.get_process_id()
         self.id = id or platform.get_process_id()
         # globally keeping track of shared and exclusive lockers:
         # globally keeping track of shared and exclusive lockers:
-        self._roster = LockRoster(path + '.roster', id=id)
+        self._roster = LockRoster(path + ".roster", id=id)
         # an exclusive lock, used for:
         # an exclusive lock, used for:
         # - holding while doing roster queries / updates
         # - holding while doing roster queries / updates
         # - holding while the Lock itself is exclusive
         # - holding while the Lock itself is exclusive
-        self._lock = ExclusiveLock(path + '.exclusive', id=id, timeout=timeout)
+        self._lock = ExclusiveLock(path + ".exclusive", id=id, timeout=timeout)
 
 
     def __enter__(self):
     def __enter__(self):
         return self.acquire()
         return self.acquire()

+ 30 - 34
src/borg/logger.py

@@ -53,7 +53,7 @@ def _log_warning(message, category, filename, lineno, file=None, line=None):
     logger.warning(msg)
     logger.warning(msg)
 
 
 
 
-def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False):
+def setup_logging(stream=None, conf_fname=None, env_var="BORG_LOGGING_CONF", level="info", is_serve=False, json=False):
     """setup logging module according to the arguments provided
     """setup logging module according to the arguments provided
 
 
     if conf_fname is given (or the config file name can be determined via
     if conf_fname is given (or the config file name can be determined via
@@ -80,7 +80,7 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
                 logging.config.fileConfig(f)
                 logging.config.fileConfig(f)
             configured = True
             configured = True
             logger = logging.getLogger(__name__)
             logger = logging.getLogger(__name__)
-            borg_logger = logging.getLogger('borg')
+            borg_logger = logging.getLogger("borg")
             borg_logger.json = json
             borg_logger.json = json
             logger.debug(f'using logging configuration read from "{conf_fname}"')
             logger.debug(f'using logging configuration read from "{conf_fname}"')
             warnings.showwarning = _log_warning
             warnings.showwarning = _log_warning
@@ -88,15 +88,15 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
         except Exception as err:  # XXX be more precise
         except Exception as err:  # XXX be more precise
             err_msg = str(err)
             err_msg = str(err)
     # if we did not / not successfully load a logging configuration, fallback to this:
     # if we did not / not successfully load a logging configuration, fallback to this:
-    logger = logging.getLogger('')
+    logger = logging.getLogger("")
     handler = logging.StreamHandler(stream)
     handler = logging.StreamHandler(stream)
     if is_serve and not json:
     if is_serve and not json:
-        fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s'
+        fmt = "$LOG %(levelname)s %(name)s Remote: %(message)s"
     else:
     else:
-        fmt = '%(message)s'
+        fmt = "%(message)s"
     formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt)
     formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt)
     handler.setFormatter(formatter)
     handler.setFormatter(formatter)
-    borg_logger = logging.getLogger('borg')
+    borg_logger = logging.getLogger("borg")
     borg_logger.formatter = formatter
     borg_logger.formatter = formatter
     borg_logger.json = json
     borg_logger.json = json
     if configured and logger.handlers:
     if configured and logger.handlers:
@@ -111,7 +111,7 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
     logger = logging.getLogger(__name__)
     logger = logging.getLogger(__name__)
     if err_msg:
     if err_msg:
         logger.warning(f'setup_logging for "{conf_fname}" failed with "{err_msg}".')
         logger.warning(f'setup_logging for "{conf_fname}" failed with "{err_msg}".')
-    logger.debug('using builtin fallback logging configuration')
+    logger.debug("using builtin fallback logging configuration")
     warnings.showwarning = _log_warning
     warnings.showwarning = _log_warning
     return handler
     return handler
 
 
@@ -151,6 +151,7 @@ def create_logger(name=None):
     be careful not to call any logger methods before the setup_logging() call.
     be careful not to call any logger methods before the setup_logging() call.
     If you try, you'll get an exception.
     If you try, you'll get an exception.
     """
     """
+
     class LazyLogger:
     class LazyLogger:
         def __init__(self, name=None):
         def __init__(self, name=None):
             self.__name = name or find_parent_module()
             self.__name = name or find_parent_module()
@@ -162,49 +163,49 @@ def create_logger(name=None):
                 if not configured:
                 if not configured:
                     raise Exception("tried to call a logger before setup_logging() was called")
                     raise Exception("tried to call a logger before setup_logging() was called")
                 self.__real_logger = logging.getLogger(self.__name)
                 self.__real_logger = logging.getLogger(self.__name)
-                if self.__name.startswith('borg.debug.') and self.__real_logger.level == logging.NOTSET:
-                    self.__real_logger.setLevel('WARNING')
+                if self.__name.startswith("borg.debug.") and self.__real_logger.level == logging.NOTSET:
+                    self.__real_logger.setLevel("WARNING")
             return self.__real_logger
             return self.__real_logger
 
 
         def getChild(self, suffix):
         def getChild(self, suffix):
-            return LazyLogger(self.__name + '.' + suffix)
+            return LazyLogger(self.__name + "." + suffix)
 
 
         def setLevel(self, *args, **kw):
         def setLevel(self, *args, **kw):
             return self.__logger.setLevel(*args, **kw)
             return self.__logger.setLevel(*args, **kw)
 
 
         def log(self, *args, **kw):
         def log(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.log(*args, **kw)
             return self.__logger.log(*args, **kw)
 
 
         def exception(self, *args, **kw):
         def exception(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.exception(*args, **kw)
             return self.__logger.exception(*args, **kw)
 
 
         def debug(self, *args, **kw):
         def debug(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.debug(*args, **kw)
             return self.__logger.debug(*args, **kw)
 
 
         def info(self, *args, **kw):
         def info(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.info(*args, **kw)
             return self.__logger.info(*args, **kw)
 
 
         def warning(self, *args, **kw):
         def warning(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.warning(*args, **kw)
             return self.__logger.warning(*args, **kw)
 
 
         def error(self, *args, **kw):
         def error(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.error(*args, **kw)
             return self.__logger.error(*args, **kw)
 
 
         def critical(self, *args, **kw):
         def critical(self, *args, **kw):
-            if 'msgid' in kw:
-                kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
+            if "msgid" in kw:
+                kw.setdefault("extra", {})["msgid"] = kw.pop("msgid")
             return self.__logger.critical(*args, **kw)
             return self.__logger.critical(*args, **kw)
 
 
     return LazyLogger(name)
     return LazyLogger(name)
@@ -212,11 +213,11 @@ def create_logger(name=None):
 
 
 class JsonFormatter(logging.Formatter):
 class JsonFormatter(logging.Formatter):
     RECORD_ATTRIBUTES = (
     RECORD_ATTRIBUTES = (
-        'levelname',
-        'name',
-        'message',
+        "levelname",
+        "name",
+        "message",
         # msgid is an attribute we made up in Borg to expose a non-changing handle for log messages
         # msgid is an attribute we made up in Borg to expose a non-changing handle for log messages
-        'msgid',
+        "msgid",
     )
     )
 
 
     # Other attributes that are not very useful but do exist:
     # Other attributes that are not very useful but do exist:
@@ -229,12 +230,7 @@ class JsonFormatter(logging.Formatter):
 
 
     def format(self, record):
     def format(self, record):
         super().format(record)
         super().format(record)
-        data = {
-            'type': 'log_message',
-            'time': record.created,
-            'message': '',
-            'levelname': 'CRITICAL',
-        }
+        data = {"type": "log_message", "time": record.created, "message": "", "levelname": "CRITICAL"}
         for attr in self.RECORD_ATTRIBUTES:
         for attr in self.RECORD_ATTRIBUTES:
             value = getattr(record, attr, None)
             value = getattr(record, attr, None)
             if value:
             if value:

+ 2 - 2
src/borg/lrucache.py

@@ -10,8 +10,8 @@ class LRUCache:
 
 
     def __setitem__(self, key, value):
     def __setitem__(self, key, value):
         assert key not in self._cache, (
         assert key not in self._cache, (
-            "Unexpected attempt to replace a cached item,"
-            " without first deleting the old item.")
+            "Unexpected attempt to replace a cached item," " without first deleting the old item."
+        )
         self._lru.append(key)
         self._lru.append(key)
         while len(self._lru) > self._capacity:
         while len(self._lru) > self._capacity:
             del self[self._lru[0]]
             del self[self._lru[0]]

+ 65 - 63
src/borg/nanorst.py

@@ -11,18 +11,18 @@ class TextPecker:
 
 
     def read(self, n):
     def read(self, n):
         self.i += n
         self.i += n
-        return self.str[self.i - n:self.i]
+        return self.str[self.i - n : self.i]
 
 
     def peek(self, n):
     def peek(self, n):
         if n >= 0:
         if n >= 0:
-            return self.str[self.i:self.i + n]
+            return self.str[self.i : self.i + n]
         else:
         else:
-            return self.str[self.i + n - 1:self.i - 1]
+            return self.str[self.i + n - 1 : self.i - 1]
 
 
     def peekline(self):
     def peekline(self):
-        out = ''
+        out = ""
         i = self.i
         i = self.i
-        while i < len(self.str) and self.str[i] != '\n':
+        while i < len(self.str) and self.str[i] != "\n":
             out += self.str[i]
             out += self.str[i]
             i += 1
             i += 1
         return out
         return out
@@ -34,18 +34,18 @@ class TextPecker:
 
 
 
 
 def process_directive(directive, arguments, out, state_hook):
 def process_directive(directive, arguments, out, state_hook):
-    if directive == 'container' and arguments == 'experimental':
-        state_hook('text', '**', out)
-        out.write('++ Experimental ++')
-        state_hook('**', 'text', out)
+    if directive == "container" and arguments == "experimental":
+        state_hook("text", "**", out)
+        out.write("++ Experimental ++")
+        state_hook("**", "text", out)
     else:
     else:
-        state_hook('text', '**', out)
+        state_hook("text", "**", out)
         out.write(directive.title())
         out.write(directive.title())
-        out.write(':\n')
-        state_hook('**', 'text', out)
+        out.write(":\n")
+        state_hook("**", "text", out)
         if arguments:
         if arguments:
             out.write(arguments)
             out.write(arguments)
-            out.write('\n')
+            out.write("\n")
 
 
 
 
 def rst_to_text(text, state_hook=None, references=None):
 def rst_to_text(text, state_hook=None, references=None):
@@ -58,12 +58,12 @@ def rst_to_text(text, state_hook=None, references=None):
     """
     """
     state_hook = state_hook or (lambda old_state, new_state, out: None)
     state_hook = state_hook or (lambda old_state, new_state, out: None)
     references = references or {}
     references = references or {}
-    state = 'text'
-    inline_mode = 'replace'
+    state = "text"
+    inline_mode = "replace"
     text = TextPecker(text)
     text = TextPecker(text)
     out = io.StringIO()
     out = io.StringIO()
 
 
-    inline_single = ('*', '`')
+    inline_single = ("*", "`")
 
 
     while True:
     while True:
         char = text.read(1)
         char = text.read(1)
@@ -71,81 +71,83 @@ def rst_to_text(text, state_hook=None, references=None):
             break
             break
         next = text.peek(1)  # type: str
         next = text.peek(1)  # type: str
 
 
-        if state == 'text':
-            if char == '\\' and text.peek(1) in inline_single:
+        if state == "text":
+            if char == "\\" and text.peek(1) in inline_single:
                 continue
                 continue
-            if text.peek(-1) != '\\':
+            if text.peek(-1) != "\\":
                 if char in inline_single and next != char:
                 if char in inline_single and next != char:
                     state_hook(state, char, out)
                     state_hook(state, char, out)
                     state = char
                     state = char
                     continue
                     continue
-                if char == next == '*':
-                    state_hook(state, '**', out)
-                    state = '**'
+                if char == next == "*":
+                    state_hook(state, "**", out)
+                    state = "**"
                     text.read(1)
                     text.read(1)
                     continue
                     continue
-                if char == next == '`':
-                    state_hook(state, '``', out)
-                    state = '``'
+                if char == next == "`":
+                    state_hook(state, "``", out)
+                    state = "``"
                     text.read(1)
                     text.read(1)
                     continue
                     continue
-                if text.peek(-1).isspace() and char == ':' and text.peek(5) == 'ref:`':
+                if text.peek(-1).isspace() and char == ":" and text.peek(5) == "ref:`":
                     # translate reference
                     # translate reference
                     text.read(5)
                     text.read(5)
-                    ref = ''
+                    ref = ""
                     while True:
                     while True:
                         char = text.peek(1)
                         char = text.peek(1)
-                        if char == '`':
+                        if char == "`":
                             text.read(1)
                             text.read(1)
                             break
                             break
-                        if char == '\n':
+                        if char == "\n":
                             text.read(1)
                             text.read(1)
                             continue  # merge line breaks in :ref:`...\n...`
                             continue  # merge line breaks in :ref:`...\n...`
                         ref += text.read(1)
                         ref += text.read(1)
                     try:
                     try:
                         out.write(references[ref])
                         out.write(references[ref])
                     except KeyError:
                     except KeyError:
-                        raise ValueError("Undefined reference in Archiver help: %r — please add reference "
-                                         "substitution to 'rst_plain_text_references'" % ref)
+                        raise ValueError(
+                            "Undefined reference in Archiver help: %r — please add reference "
+                            "substitution to 'rst_plain_text_references'" % ref
+                        )
                     continue
                     continue
-                if char == ':' and text.peek(2) == ':\n':  # End of line code block
+                if char == ":" and text.peek(2) == ":\n":  # End of line code block
                     text.read(2)
                     text.read(2)
-                    state_hook(state, 'code-block', out)
-                    state = 'code-block'
-                    out.write(':\n')
+                    state_hook(state, "code-block", out)
+                    state = "code-block"
+                    out.write(":\n")
                     continue
                     continue
-            if text.peek(-2) in ('\n\n', '') and char == next == '.':
+            if text.peek(-2) in ("\n\n", "") and char == next == ".":
                 text.read(2)
                 text.read(2)
-                directive, is_directive, arguments = text.readline().partition('::')
+                directive, is_directive, arguments = text.readline().partition("::")
                 text.read(1)
                 text.read(1)
                 if not is_directive:
                 if not is_directive:
                     # partition: if the separator is not in the text, the leftmost output is the entire input
                     # partition: if the separator is not in the text, the leftmost output is the entire input
-                    if directive == 'nanorst: inline-fill':
-                        inline_mode = 'fill'
-                    elif directive == 'nanorst: inline-replace':
-                        inline_mode = 'replace'
+                    if directive == "nanorst: inline-fill":
+                        inline_mode = "fill"
+                    elif directive == "nanorst: inline-replace":
+                        inline_mode = "replace"
                     continue
                     continue
                 process_directive(directive, arguments.strip(), out, state_hook)
                 process_directive(directive, arguments.strip(), out, state_hook)
                 continue
                 continue
         if state in inline_single and char == state:
         if state in inline_single and char == state:
-            state_hook(state, 'text', out)
-            state = 'text'
-            if inline_mode == 'fill':
-                out.write(2 * ' ')
+            state_hook(state, "text", out)
+            state = "text"
+            if inline_mode == "fill":
+                out.write(2 * " ")
             continue
             continue
-        if state == '``' and char == next == '`':
-            state_hook(state, 'text', out)
-            state = 'text'
+        if state == "``" and char == next == "`":
+            state_hook(state, "text", out)
+            state = "text"
             text.read(1)
             text.read(1)
-            if inline_mode == 'fill':
-                out.write(4 * ' ')
+            if inline_mode == "fill":
+                out.write(4 * " ")
             continue
             continue
-        if state == '**' and char == next == '*':
-            state_hook(state, 'text', out)
-            state = 'text'
+        if state == "**" and char == next == "*":
+            state_hook(state, "text", out)
+            state = "text"
             text.read(1)
             text.read(1)
             continue
             continue
-        if state == 'code-block' and char == next == '\n' and text.peek(5)[1:] != '    ':
+        if state == "code-block" and char == next == "\n" and text.peek(5)[1:] != "    ":
             # Foo::
             # Foo::
             #
             #
             #     *stuff* *code* *ignore .. all markup*
             #     *stuff* *code* *ignore .. all markup*
@@ -153,11 +155,11 @@ def rst_to_text(text, state_hook=None, references=None):
             #     More arcane stuff
             #     More arcane stuff
             #
             #
             # Regular text...
             # Regular text...
-            state_hook(state, 'text', out)
-            state = 'text'
+            state_hook(state, "text", out)
+            state = "text"
         out.write(char)
         out.write(char)
 
 
-    assert state == 'text', 'Invalid final state %r (This usually indicates unmatched */**)' % state
+    assert state == "text", "Invalid final state %r (This usually indicates unmatched */**)" % state
     return out.getvalue()
     return out.getvalue()
 
 
 
 
@@ -191,12 +193,12 @@ class RstToTextLazy:
 
 
 
 
 def ansi_escapes(old_state, new_state, out):
 def ansi_escapes(old_state, new_state, out):
-    if old_state == 'text' and new_state in ('*', '`', '``'):
-        out.write('\033[4m')
-    if old_state == 'text' and new_state == '**':
-        out.write('\033[1m')
-    if old_state in ('*', '`', '``', '**') and new_state == 'text':
-        out.write('\033[0m')
+    if old_state == "text" and new_state in ("*", "`", "``"):
+        out.write("\033[4m")
+    if old_state == "text" and new_state == "**":
+        out.write("\033[1m")
+    if old_state in ("*", "`", "``", "**") and new_state == "text":
+        out.write("\033[0m")
 
 
 
 
 def rst_to_terminal(rst, references=None, destination=sys.stdout):
 def rst_to_terminal(rst, references=None, destination=sys.stdout):

+ 36 - 48
src/borg/patterns.py

@@ -75,6 +75,7 @@ class PatternMatcher:
     *fallback* is a boolean value that *match()* returns if no matching patterns are found.
     *fallback* is a boolean value that *match()* returns if no matching patterns are found.
 
 
     """
     """
+
     def __init__(self, fallback=None):
     def __init__(self, fallback=None):
         self._items = []
         self._items = []
 
 
@@ -96,18 +97,13 @@ class PatternMatcher:
         self.include_patterns = []
         self.include_patterns = []
 
 
         # TODO: move this info to parse_inclexcl_command and store in PatternBase subclass?
         # TODO: move this info to parse_inclexcl_command and store in PatternBase subclass?
-        self.is_include_cmd = {
-            IECommand.Exclude: False,
-            IECommand.ExcludeNoRecurse: False,
-            IECommand.Include: True
-        }
+        self.is_include_cmd = {IECommand.Exclude: False, IECommand.ExcludeNoRecurse: False, IECommand.Include: True}
 
 
     def empty(self):
     def empty(self):
         return not len(self._items) and not len(self._path_full_patterns)
         return not len(self._items) and not len(self._path_full_patterns)
 
 
     def _add(self, pattern, cmd):
     def _add(self, pattern, cmd):
-        """*cmd* is an IECommand value.
-        """
+        """*cmd* is an IECommand value."""
         if isinstance(pattern, PathFullPattern):
         if isinstance(pattern, PathFullPattern):
             key = pattern.pattern  # full, normalized path
             key = pattern.pattern  # full, normalized path
             self._path_full_patterns[key] = cmd
             self._path_full_patterns[key] = cmd
@@ -123,8 +119,7 @@ class PatternMatcher:
             self._add(pattern, cmd)
             self._add(pattern, cmd)
 
 
     def add_includepaths(self, include_paths):
     def add_includepaths(self, include_paths):
-        """Used to add inclusion-paths from args.paths (from commandline).
-        """
+        """Used to add inclusion-paths from args.paths (from commandline)."""
         include_patterns = [parse_pattern(p, PathPrefixPattern) for p in include_paths]
         include_patterns = [parse_pattern(p, PathPrefixPattern) for p in include_paths]
         self.add(include_patterns, IECommand.Include)
         self.add(include_patterns, IECommand.Include)
         self.fallback = not include_patterns
         self.fallback = not include_patterns
@@ -135,8 +130,7 @@ class PatternMatcher:
         return [p for p in self.include_patterns if p.match_count == 0]
         return [p for p in self.include_patterns if p.match_count == 0]
 
 
     def add_inclexcl(self, patterns):
     def add_inclexcl(self, patterns):
-        """Add list of patterns (of type CmdTuple) to internal list.
-        """
+        """Add list of patterns (of type CmdTuple) to internal list."""
         for pattern, cmd in patterns:
         for pattern, cmd in patterns:
             self._add(pattern, cmd)
             self._add(pattern, cmd)
 
 
@@ -172,12 +166,12 @@ def normalize_path(path):
     """normalize paths for MacOS (but do nothing on other platforms)"""
     """normalize paths for MacOS (but do nothing on other platforms)"""
     # HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
     # HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
     # Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
     # Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
-    return unicodedata.normalize('NFD', path) if sys.platform == 'darwin' else path
+    return unicodedata.normalize("NFD", path) if sys.platform == "darwin" else path
 
 
 
 
 class PatternBase:
 class PatternBase:
-    """Shared logic for inclusion/exclusion patterns.
-    """
+    """Shared logic for inclusion/exclusion patterns."""
+
     PREFIX = NotImplemented
     PREFIX = NotImplemented
 
 
     def __init__(self, pattern, recurse_dir=False):
     def __init__(self, pattern, recurse_dir=False):
@@ -201,7 +195,7 @@ class PatternBase:
         return matches
         return matches
 
 
     def __repr__(self):
     def __repr__(self):
-        return f'{type(self)}({self.pattern})'
+        return f"{type(self)}({self.pattern})"
 
 
     def __str__(self):
     def __str__(self):
         return self.pattern_orig
         return self.pattern_orig
@@ -216,6 +210,7 @@ class PatternBase:
 
 
 class PathFullPattern(PatternBase):
 class PathFullPattern(PatternBase):
     """Full match of a path."""
     """Full match of a path."""
+
     PREFIX = "pf"
     PREFIX = "pf"
 
 
     def _prepare(self, pattern):
     def _prepare(self, pattern):
@@ -236,6 +231,7 @@ class PathPrefixPattern(PatternBase):
     If a directory is specified, all paths that start with that
     If a directory is specified, all paths that start with that
     path match as well.  A trailing slash makes no difference.
     path match as well.  A trailing slash makes no difference.
     """
     """
+
     PREFIX = "pp"
     PREFIX = "pp"
 
 
     def _prepare(self, pattern):
     def _prepare(self, pattern):
@@ -251,13 +247,14 @@ class FnmatchPattern(PatternBase):
     """Shell glob patterns to exclude.  A trailing slash means to
     """Shell glob patterns to exclude.  A trailing slash means to
     exclude the contents of a directory, but not the directory itself.
     exclude the contents of a directory, but not the directory itself.
     """
     """
+
     PREFIX = "fm"
     PREFIX = "fm"
 
 
     def _prepare(self, pattern):
     def _prepare(self, pattern):
         if pattern.endswith(os.path.sep):
         if pattern.endswith(os.path.sep):
-            pattern = os.path.normpath(pattern).rstrip(os.path.sep) + os.path.sep + '*' + os.path.sep
+            pattern = os.path.normpath(pattern).rstrip(os.path.sep) + os.path.sep + "*" + os.path.sep
         else:
         else:
-            pattern = os.path.normpath(pattern) + os.path.sep + '*'
+            pattern = os.path.normpath(pattern) + os.path.sep + "*"
 
 
         self.pattern = pattern.lstrip(os.path.sep)  # sep at beginning is removed
         self.pattern = pattern.lstrip(os.path.sep)  # sep at beginning is removed
 
 
@@ -266,13 +263,14 @@ class FnmatchPattern(PatternBase):
         self.regex = re.compile(fnmatch.translate(self.pattern))
         self.regex = re.compile(fnmatch.translate(self.pattern))
 
 
     def _match(self, path):
     def _match(self, path):
-        return (self.regex.match(path + os.path.sep) is not None)
+        return self.regex.match(path + os.path.sep) is not None
 
 
 
 
 class ShellPattern(PatternBase):
 class ShellPattern(PatternBase):
     """Shell glob patterns to exclude.  A trailing slash means to
     """Shell glob patterns to exclude.  A trailing slash means to
     exclude the contents of a directory, but not the directory itself.
     exclude the contents of a directory, but not the directory itself.
     """
     """
+
     PREFIX = "sh"
     PREFIX = "sh"
 
 
     def _prepare(self, pattern):
     def _prepare(self, pattern):
@@ -287,12 +285,12 @@ class ShellPattern(PatternBase):
         self.regex = re.compile(shellpattern.translate(self.pattern))
         self.regex = re.compile(shellpattern.translate(self.pattern))
 
 
     def _match(self, path):
     def _match(self, path):
-        return (self.regex.match(path + os.path.sep) is not None)
+        return self.regex.match(path + os.path.sep) is not None
 
 
 
 
 class RegexPattern(PatternBase):
 class RegexPattern(PatternBase):
-    """Regular expression to exclude.
-    """
+    """Regular expression to exclude."""
+
     PREFIX = "re"
     PREFIX = "re"
 
 
     def _prepare(self, pattern):
     def _prepare(self, pattern):
@@ -301,28 +299,22 @@ class RegexPattern(PatternBase):
 
 
     def _match(self, path):
     def _match(self, path):
         # Normalize path separators
         # Normalize path separators
-        if os.path.sep != '/':
-            path = path.replace(os.path.sep, '/')
+        if os.path.sep != "/":
+            path = path.replace(os.path.sep, "/")
 
 
-        return (self.regex.search(path) is not None)
+        return self.regex.search(path) is not None
 
 
 
 
-_PATTERN_CLASSES = {
-    FnmatchPattern,
-    PathFullPattern,
-    PathPrefixPattern,
-    RegexPattern,
-    ShellPattern,
-}
+_PATTERN_CLASSES = {FnmatchPattern, PathFullPattern, PathPrefixPattern, RegexPattern, ShellPattern}
 
 
 _PATTERN_CLASS_BY_PREFIX = {i.PREFIX: i for i in _PATTERN_CLASSES}
 _PATTERN_CLASS_BY_PREFIX = {i.PREFIX: i for i in _PATTERN_CLASSES}
 
 
-CmdTuple = namedtuple('CmdTuple', 'val cmd')
+CmdTuple = namedtuple("CmdTuple", "val cmd")
 
 
 
 
 class IECommand(Enum):
 class IECommand(Enum):
-    """A command that an InclExcl file line can represent.
-    """
+    """A command that an InclExcl file line can represent."""
+
     RootPath = 1
     RootPath = 1
     PatternStyle = 2
     PatternStyle = 2
     Include = 3
     Include = 3
@@ -343,9 +335,7 @@ def get_pattern_class(prefix):
 
 
 
 
 def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
 def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
-    """Read pattern from string and return an instance of the appropriate implementation class.
-
-    """
+    """Read pattern from string and return an instance of the appropriate implementation class."""
     if len(pattern) > 2 and pattern[2] == ":" and pattern[:2].isalnum():
     if len(pattern) > 2 and pattern[2] == ":" and pattern[:2].isalnum():
         (style, pattern) = (pattern[:2], pattern[3:])
         (style, pattern) = (pattern[:2], pattern[3:])
         cls = get_pattern_class(style)
         cls = get_pattern_class(style)
@@ -355,8 +345,7 @@ def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
 
 
 
 
 def parse_exclude_pattern(pattern_str, fallback=FnmatchPattern):
 def parse_exclude_pattern(pattern_str, fallback=FnmatchPattern):
-    """Read pattern from string and return an instance of the appropriate implementation class.
-    """
+    """Read pattern from string and return an instance of the appropriate implementation class."""
     epattern_obj = parse_pattern(pattern_str, fallback, recurse_dir=False)
     epattern_obj = parse_pattern(pattern_str, fallback, recurse_dir=False)
     return CmdTuple(epattern_obj, IECommand.ExcludeNoRecurse)
     return CmdTuple(epattern_obj, IECommand.ExcludeNoRecurse)
 
 
@@ -365,21 +354,20 @@ def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern):
     """Read a --patterns-from command from string and return a CmdTuple object."""
     """Read a --patterns-from command from string and return a CmdTuple object."""
 
 
     cmd_prefix_map = {
     cmd_prefix_map = {
-        '-': IECommand.Exclude,
-        '!': IECommand.ExcludeNoRecurse,
-        '+': IECommand.Include,
-        'R': IECommand.RootPath,
-        'r': IECommand.RootPath,
-        'P': IECommand.PatternStyle,
-        'p': IECommand.PatternStyle,
+        "-": IECommand.Exclude,
+        "!": IECommand.ExcludeNoRecurse,
+        "+": IECommand.Include,
+        "R": IECommand.RootPath,
+        "r": IECommand.RootPath,
+        "P": IECommand.PatternStyle,
+        "p": IECommand.PatternStyle,
     }
     }
     if not cmd_line_str:
     if not cmd_line_str:
         raise argparse.ArgumentTypeError("A pattern/command must not be empty.")
         raise argparse.ArgumentTypeError("A pattern/command must not be empty.")
 
 
     cmd = cmd_prefix_map.get(cmd_line_str[0])
     cmd = cmd_prefix_map.get(cmd_line_str[0])
     if cmd is None:
     if cmd is None:
-        raise argparse.ArgumentTypeError("A pattern/command must start with anyone of: %s" %
-                                         ', '.join(cmd_prefix_map))
+        raise argparse.ArgumentTypeError("A pattern/command must start with anyone of: %s" % ", ".join(cmd_prefix_map))
 
 
     # remaining text on command-line following the command character
     # remaining text on command-line following the command character
     remainder_str = cmd_line_str[1:].lstrip()
     remainder_str = cmd_line_str[1:].lstrip()

+ 1 - 0
src/borg/platform/__init__.py

@@ -17,6 +17,7 @@ OS_API_VERSION = API_VERSION
 
 
 if not is_win32:
 if not is_win32:
     from .posix import process_alive, local_pid_alive
     from .posix import process_alive, local_pid_alive
+
     # posix swidth implementation works for: linux, freebsd, darwin, openindiana, cygwin
     # posix swidth implementation works for: linux, freebsd, darwin, openindiana, cygwin
     from .posix import swidth
     from .posix import swidth
     from .posix import get_errno
     from .posix import get_errno

+ 22 - 15
src/borg/platform/base.py

@@ -20,9 +20,9 @@ platform API: that way platform APIs provided by the platform-specific support m
 are correctly composed into the base functionality.
 are correctly composed into the base functionality.
 """
 """
 
 
-API_VERSION = '1.2_05'
+API_VERSION = "1.2_05"
 
 
-fdatasync = getattr(os, 'fdatasync', os.fsync)
+fdatasync = getattr(os, "fdatasync", os.fsync)
 
 
 from .xattr import ENOATTR
 from .xattr import ENOATTR
 
 
@@ -86,14 +86,16 @@ try:
 
 
     def set_flags(path, bsd_flags, fd=None):
     def set_flags(path, bsd_flags, fd=None):
         lchflags(path, bsd_flags)
         lchflags(path, bsd_flags)
+
 except ImportError:
 except ImportError:
+
     def set_flags(path, bsd_flags, fd=None):
     def set_flags(path, bsd_flags, fd=None):
         pass
         pass
 
 
 
 
 def get_flags(path, st, fd=None):
 def get_flags(path, st, fd=None):
     """Return BSD-style file flags for path or stat without following symlinks."""
     """Return BSD-style file flags for path or stat without following symlinks."""
-    return getattr(st, 'st_flags', 0)
+    return getattr(st, "st_flags", 0)
 
 
 
 
 def sync_dir(path):
 def sync_dir(path):
@@ -114,8 +116,8 @@ def sync_dir(path):
 
 
 
 
 def safe_fadvise(fd, offset, len, advice):
 def safe_fadvise(fd, offset, len, advice):
-    if hasattr(os, 'posix_fadvise'):
-        advice = getattr(os, 'POSIX_FADV_' + advice)
+    if hasattr(os, "posix_fadvise"):
+        advice = getattr(os, "POSIX_FADV_" + advice)
         try:
         try:
             os.posix_fadvise(fd, offset, len, advice)
             os.posix_fadvise(fd, offset, len, advice)
         except OSError:
         except OSError:
@@ -158,7 +160,7 @@ class SyncFile:
                that corresponds to path (like from os.open(path, ...) or os.mkstemp(...))
                that corresponds to path (like from os.open(path, ...) or os.mkstemp(...))
         :param binary: whether to open in binary mode, default is False.
         :param binary: whether to open in binary mode, default is False.
         """
         """
-        mode = 'xb' if binary else 'x'  # x -> raise FileExists exception in open() if file exists already
+        mode = "xb" if binary else "x"  # x -> raise FileExists exception in open() if file exists already
         self.path = path
         self.path = path
         if fd is None:
         if fd is None:
             self.f = open(path, mode=mode)  # python file object
             self.f = open(path, mode=mode)  # python file object
@@ -181,15 +183,17 @@ class SyncFile:
         after sync().
         after sync().
         """
         """
         from .. import platform
         from .. import platform
+
         self.f.flush()
         self.f.flush()
         platform.fdatasync(self.fd)
         platform.fdatasync(self.fd)
         # tell the OS that it does not need to cache what we just wrote,
         # tell the OS that it does not need to cache what we just wrote,
         # avoids spoiling the cache for the OS and other processes.
         # avoids spoiling the cache for the OS and other processes.
-        safe_fadvise(self.fd, 0, 0, 'DONTNEED')
+        safe_fadvise(self.fd, 0, 0, "DONTNEED")
 
 
     def close(self):
     def close(self):
         """sync() and close."""
         """sync() and close."""
         from .. import platform
         from .. import platform
+
         dirname = None
         dirname = None
         try:
         try:
             dirname = os.path.dirname(self.path)
             dirname = os.path.dirname(self.path)
@@ -216,23 +220,26 @@ class SaveFile:
     Internally used temporary files are created in the target directory and are
     Internally used temporary files are created in the target directory and are
     named <BASENAME>-<RANDOMCHARS>.tmp and cleaned up in normal and error conditions.
     named <BASENAME>-<RANDOMCHARS>.tmp and cleaned up in normal and error conditions.
     """
     """
+
     def __init__(self, path, binary=False):
     def __init__(self, path, binary=False):
         self.binary = binary
         self.binary = binary
         self.path = path
         self.path = path
         self.dir = os.path.dirname(path)
         self.dir = os.path.dirname(path)
-        self.tmp_prefix = os.path.basename(path) + '-'
+        self.tmp_prefix = os.path.basename(path) + "-"
         self.tmp_fd = None  # OS-level fd
         self.tmp_fd = None  # OS-level fd
         self.tmp_fname = None  # full path/filename corresponding to self.tmp_fd
         self.tmp_fname = None  # full path/filename corresponding to self.tmp_fd
         self.f = None  # python-file-like SyncFile
         self.f = None  # python-file-like SyncFile
 
 
     def __enter__(self):
     def __enter__(self):
         from .. import platform
         from .. import platform
-        self.tmp_fd, self.tmp_fname = tempfile.mkstemp(prefix=self.tmp_prefix, suffix='.tmp', dir=self.dir)
+
+        self.tmp_fd, self.tmp_fname = tempfile.mkstemp(prefix=self.tmp_prefix, suffix=".tmp", dir=self.dir)
         self.f = platform.SyncFile(self.tmp_fname, fd=self.tmp_fd, binary=self.binary)
         self.f = platform.SyncFile(self.tmp_fname, fd=self.tmp_fd, binary=self.binary)
         return self.f
         return self.f
 
 
     def __exit__(self, exc_type, exc_val, exc_tb):
     def __exit__(self, exc_type, exc_val, exc_tb):
         from .. import platform
         from .. import platform
+
         self.f.close()  # this indirectly also closes self.tmp_fd
         self.f.close()  # this indirectly also closes self.tmp_fd
         self.tmp_fd = None
         self.tmp_fd = None
         if exc_type is not None:
         if exc_type is not None:
@@ -246,7 +253,7 @@ class SaveFile:
             # thanks to the crappy os.umask api, we can't query the umask without setting it. :-(
             # thanks to the crappy os.umask api, we can't query the umask without setting it. :-(
             umask = os.umask(UMASK_DEFAULT)
             umask = os.umask(UMASK_DEFAULT)
             os.umask(umask)
             os.umask(umask)
-            os.chmod(self.tmp_fname, mode=0o666 & ~ umask)
+            os.chmod(self.tmp_fname, mode=0o666 & ~umask)
         except OSError:
         except OSError:
             # chmod might fail if the fs does not support it.
             # chmod might fail if the fs does not support it.
             # this is not harmful, the file will still have permissions for the owner.
             # this is not harmful, the file will still have permissions for the owner.
@@ -270,13 +277,13 @@ def swidth(s):
 
 
 
 
 # patched socket.getfqdn() - see https://bugs.python.org/issue5004
 # patched socket.getfqdn() - see https://bugs.python.org/issue5004
-def getfqdn(name=''):
+def getfqdn(name=""):
     """Get fully qualified domain name from name.
     """Get fully qualified domain name from name.
 
 
     An empty argument is interpreted as meaning the local host.
     An empty argument is interpreted as meaning the local host.
     """
     """
     name = name.strip()
     name = name.strip()
-    if not name or name == '0.0.0.0':
+    if not name or name == "0.0.0.0":
         name = socket.gethostname()
         name = socket.gethostname()
     try:
     try:
         addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
         addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
@@ -296,14 +303,14 @@ hostname = socket.gethostname()
 fqdn = getfqdn(hostname)
 fqdn = getfqdn(hostname)
 # some people put the fqdn into /etc/hostname (which is wrong, should be the short hostname)
 # some people put the fqdn into /etc/hostname (which is wrong, should be the short hostname)
 # fix this (do the same as "hostname --short" cli command does internally):
 # fix this (do the same as "hostname --short" cli command does internally):
-hostname = hostname.split('.')[0]
+hostname = hostname.split(".")[0]
 
 
 # uuid.getnode() is problematic in some environments (e.g. OpenVZ, see #3968) where the virtual MAC address
 # uuid.getnode() is problematic in some environments (e.g. OpenVZ, see #3968) where the virtual MAC address
 # is all-zero. uuid.getnode falls back to returning a random value in that case, which is not what we want.
 # is all-zero. uuid.getnode falls back to returning a random value in that case, which is not what we want.
 # thus, we offer BORG_HOST_ID where a user can set an own, unique id for each of his hosts.
 # thus, we offer BORG_HOST_ID where a user can set an own, unique id for each of his hosts.
-hostid = os.environ.get('BORG_HOST_ID')
+hostid = os.environ.get("BORG_HOST_ID")
 if not hostid:
 if not hostid:
-    hostid = f'{fqdn}@{uuid.getnode()}'
+    hostid = f"{fqdn}@{uuid.getnode()}"
 
 
 
 
 def get_process_id():
 def get_process_id():

+ 6 - 5
src/borg/platform/xattr.py

@@ -18,7 +18,7 @@ def split_string0(buf):
     """split a list of zero-terminated strings into python not-zero-terminated bytes"""
     """split a list of zero-terminated strings into python not-zero-terminated bytes"""
     if isinstance(buf, bytearray):
     if isinstance(buf, bytearray):
         buf = bytes(buf)  # use a bytes object, so we return a list of bytes objects
         buf = bytes(buf)  # use a bytes object, so we return a list of bytes objects
-    return buf.split(b'\0')[:-1]
+    return buf.split(b"\0")[:-1]
 
 
 
 
 def split_lstring(buf):
 def split_lstring(buf):
@@ -27,8 +27,8 @@ def split_lstring(buf):
     mv = memoryview(buf)
     mv = memoryview(buf)
     while mv:
     while mv:
         length = mv[0]
         length = mv[0]
-        result.append(bytes(mv[1:1 + length]))
-        mv = mv[1 + length:]
+        result.append(bytes(mv[1 : 1 + length]))
+        mv = mv[1 + length :]
     return result
     return result
 
 
 
 
@@ -38,6 +38,7 @@ class BufferTooSmallError(Exception):
 
 
 def _check(rv, path=None, detect_buffer_too_small=False):
 def _check(rv, path=None, detect_buffer_too_small=False):
     from . import get_errno
     from . import get_errno
+
     if rv < 0:
     if rv < 0:
         e = get_errno()
         e = get_errno()
         if detect_buffer_too_small and e == errno.ERANGE:
         if detect_buffer_too_small and e == errno.ERANGE:
@@ -48,9 +49,9 @@ def _check(rv, path=None, detect_buffer_too_small=False):
             try:
             try:
                 msg = os.strerror(e)
                 msg = os.strerror(e)
             except ValueError:
             except ValueError:
-                msg = ''
+                msg = ""
             if isinstance(path, int):
             if isinstance(path, int):
-                path = '<FD %d>' % path
+                path = "<FD %d>" % path
             raise OSError(e, msg, path)
             raise OSError(e, msg, path)
     if detect_buffer_too_small and rv >= len(buffer):
     if detect_buffer_too_small and rv >= len(buffer):
         # freebsd does not error with ERANGE if the buffer is too small,
         # freebsd does not error with ERANGE if the buffer is too small,

+ 4 - 4
src/borg/platformflags.py

@@ -6,7 +6,7 @@ Use these Flags instead of sys.platform.startswith('<OS>') or try/except.
 
 
 import sys
 import sys
 
 
-is_win32 = sys.platform.startswith('win32')
-is_linux = sys.platform.startswith('linux')
-is_freebsd = sys.platform.startswith('freebsd')
-is_darwin = sys.platform.startswith('darwin')
+is_win32 = sys.platform.startswith("win32")
+is_linux = sys.platform.startswith("linux")
+is_freebsd = sys.platform.startswith("freebsd")
+is_darwin = sys.platform.startswith("darwin")

Diff do ficheiro suprimidas por serem muito extensas
+ 306 - 247
src/borg/remote.py


Diff do ficheiro suprimidas por serem muito extensas
+ 236 - 210
src/borg/repository.py


+ 14 - 9
src/borg/selftest.py

@@ -50,16 +50,16 @@ class SelfTestResult(TestResult):
 
 
     def log_results(self, logger):
     def log_results(self, logger):
         for test, failure in self.errors + self.failures + self.unexpectedSuccesses:
         for test, failure in self.errors + self.failures + self.unexpectedSuccesses:
-            logger.error('self test %s FAILED:\n%s', self.test_name(test), failure)
+            logger.error("self test %s FAILED:\n%s", self.test_name(test), failure)
         for test, reason in self.skipped:
         for test, reason in self.skipped:
-            logger.warning('self test %s skipped: %s', self.test_name(test), reason)
+            logger.warning("self test %s skipped: %s", self.test_name(test), reason)
 
 
     def successful_test_count(self):
     def successful_test_count(self):
         return len(self.successes)
         return len(self.successes)
 
 
 
 
 def selftest(logger):
 def selftest(logger):
-    if os.environ.get('BORG_SELFTEST') == 'disabled':
+    if os.environ.get("BORG_SELFTEST") == "disabled":
         logger.debug("borg selftest disabled via BORG_SELFTEST env variable")
         logger.debug("borg selftest disabled via BORG_SELFTEST env variable")
         return
         return
     selftest_started = time.perf_counter()
     selftest_started = time.perf_counter()
@@ -69,7 +69,7 @@ def selftest(logger):
         module = sys.modules[test_case.__module__]
         module = sys.modules[test_case.__module__]
         # a normal borg user does not have pytest installed, we must not require it in the test modules used here.
         # a normal borg user does not have pytest installed, we must not require it in the test modules used here.
         # note: this only detects the usual toplevel import
         # note: this only detects the usual toplevel import
-        assert 'pytest' not in dir(module), "pytest must not be imported in %s" % module.__name__
+        assert "pytest" not in dir(module), "pytest must not be imported in %s" % module.__name__
         test_suite.addTest(defaultTestLoader.loadTestsFromTestCase(test_case))
         test_suite.addTest(defaultTestLoader.loadTestsFromTestCase(test_case))
     test_suite.run(result)
     test_suite.run(result)
     result.log_results(logger)
     result.log_results(logger)
@@ -77,12 +77,17 @@ def selftest(logger):
     count_mismatch = successful_tests != SELFTEST_COUNT
     count_mismatch = successful_tests != SELFTEST_COUNT
     if result.wasSuccessful() and count_mismatch:
     if result.wasSuccessful() and count_mismatch:
         # only print this if all tests succeeded
         # only print this if all tests succeeded
-        logger.error("self test count (%d != %d) mismatch, either test discovery is broken or a test was added "
-                     "without updating borg.selftest",
-                     successful_tests, SELFTEST_COUNT)
+        logger.error(
+            "self test count (%d != %d) mismatch, either test discovery is broken or a test was added "
+            "without updating borg.selftest",
+            successful_tests,
+            SELFTEST_COUNT,
+        )
     if not result.wasSuccessful() or count_mismatch:
     if not result.wasSuccessful() or count_mismatch:
-        logger.error("self test failed\n"
-                     "Could be a bug either in Borg, the package / distribution you use, your OS or your hardware.")
+        logger.error(
+            "self test failed\n"
+            "Could be a bug either in Borg, the package / distribution you use, your OS or your hardware."
+        )
         sys.exit(2)
         sys.exit(2)
         assert False, "sanity assertion failed: ran beyond sys.exit()"
         assert False, "sanity assertion failed: ran beyond sys.exit()"
     selftest_elapsed = time.perf_counter() - selftest_started
     selftest_elapsed = time.perf_counter() - selftest_started

+ 1 - 1
src/borg/shellpattern.py

@@ -33,7 +33,7 @@ def translate(pat, match_end=r"\Z"):
             if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep:
             if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep:
                 # **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands
                 # **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands
                 # for the platform-specific path separator
                 # for the platform-specific path separator
-                res += fr"(?:[^\{sep}]*\{sep})*"
+                res += rf"(?:[^\{sep}]*\{sep})*"
                 i += 2
                 i += 2
             else:
             else:
                 # * == wildcard for name parts (does not cross path separator)
                 # * == wildcard for name parts (does not cross path separator)

+ 35 - 32
src/borg/testsuite/__init__.py

@@ -2,6 +2,7 @@ from contextlib import contextmanager
 import filecmp
 import filecmp
 import functools
 import functools
 import os
 import os
+
 try:
 try:
     import posix
     import posix
 except ImportError:
 except ImportError:
@@ -25,14 +26,14 @@ from .. import platform
 from ..fuse_impl import llfuse, has_pyfuse3, has_llfuse
 from ..fuse_impl import llfuse, has_pyfuse3, has_llfuse
 
 
 # Does this version of llfuse support ns precision?
 # Does this version of llfuse support ns precision?
-have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns') if llfuse else False
+have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, "st_mtime_ns") if llfuse else False
 
 
 try:
 try:
     from pytest import raises
     from pytest import raises
 except:  # noqa
 except:  # noqa
     raises = None
     raises = None
 
 
-has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux')
+has_lchflags = hasattr(os, "lchflags") or sys.platform.startswith("linux")
 try:
 try:
     with tempfile.NamedTemporaryFile() as file:
     with tempfile.NamedTemporaryFile() as file:
         platform.set_flags(file.name, stat.UF_NODUMP)
         platform.set_flags(file.name, stat.UF_NODUMP)
@@ -40,14 +41,14 @@ except OSError:
     has_lchflags = False
     has_lchflags = False
 
 
 # The mtime get/set precision varies on different OS and Python versions
 # The mtime get/set precision varies on different OS and Python versions
-if posix and 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
+if posix and "HAVE_FUTIMENS" in getattr(posix, "_have_functions", []):
     st_mtime_ns_round = 0
     st_mtime_ns_round = 0
-elif 'HAVE_UTIMES' in sysconfig.get_config_vars():
+elif "HAVE_UTIMES" in sysconfig.get_config_vars():
     st_mtime_ns_round = -6
     st_mtime_ns_round = -6
 else:
 else:
     st_mtime_ns_round = -9
     st_mtime_ns_round = -9
 
 
-if sys.platform.startswith('netbsd'):
+if sys.platform.startswith("netbsd"):
     st_mtime_ns_round = -4  # only >1 microsecond resolution here?
     st_mtime_ns_round = -4  # only >1 microsecond resolution here?
 
 
 
 
@@ -61,8 +62,8 @@ def unopened_tempfile():
 def are_symlinks_supported():
 def are_symlinks_supported():
     with unopened_tempfile() as filepath:
     with unopened_tempfile() as filepath:
         try:
         try:
-            os.symlink('somewhere', filepath)
-            if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == 'somewhere':
+            os.symlink("somewhere", filepath)
+            if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == "somewhere":
                 return True
                 return True
         except OSError:
         except OSError:
             pass
             pass
@@ -71,12 +72,12 @@ def are_symlinks_supported():
 
 
 @functools.lru_cache
 @functools.lru_cache
 def are_hardlinks_supported():
 def are_hardlinks_supported():
-    if not hasattr(os, 'link'):
+    if not hasattr(os, "link"):
         # some pythons do not have os.link
         # some pythons do not have os.link
         return False
         return False
 
 
     with unopened_tempfile() as file1path, unopened_tempfile() as file2path:
     with unopened_tempfile() as file1path, unopened_tempfile() as file2path:
-        open(file1path, 'w').close()
+        open(file1path, "w").close()
         try:
         try:
             os.link(file1path, file2path)
             os.link(file1path, file2path)
             stat1 = os.stat(file1path)
             stat1 = os.stat(file1path)
@@ -108,9 +109,9 @@ def is_utime_fully_supported():
     with unopened_tempfile() as filepath:
     with unopened_tempfile() as filepath:
         # Some filesystems (such as SSHFS) don't support utime on symlinks
         # Some filesystems (such as SSHFS) don't support utime on symlinks
         if are_symlinks_supported():
         if are_symlinks_supported():
-            os.symlink('something', filepath)
+            os.symlink("something", filepath)
         else:
         else:
-            open(filepath, 'w').close()
+            open(filepath, "w").close()
         try:
         try:
             os.utime(filepath, (1000, 2000), follow_symlinks=False)
             os.utime(filepath, (1000, 2000), follow_symlinks=False)
             new_stats = os.stat(filepath, follow_symlinks=False)
             new_stats = os.stat(filepath, follow_symlinks=False)
@@ -125,14 +126,14 @@ def is_utime_fully_supported():
 
 
 @functools.lru_cache
 @functools.lru_cache
 def is_birthtime_fully_supported():
 def is_birthtime_fully_supported():
-    if not hasattr(os.stat_result, 'st_birthtime'):
+    if not hasattr(os.stat_result, "st_birthtime"):
         return False
         return False
     with unopened_tempfile() as filepath:
     with unopened_tempfile() as filepath:
         # Some filesystems (such as SSHFS) don't support utime on symlinks
         # Some filesystems (such as SSHFS) don't support utime on symlinks
         if are_symlinks_supported():
         if are_symlinks_supported():
-            os.symlink('something', filepath)
+            os.symlink("something", filepath)
         else:
         else:
-            open(filepath, 'w').close()
+            open(filepath, "w").close()
         try:
         try:
             birthtime, mtime, atime = 946598400, 946684800, 946771200
             birthtime, mtime, atime = 946598400, 946684800, 946771200
             os.utime(filepath, (atime, birthtime), follow_symlinks=False)
             os.utime(filepath, (atime, birthtime), follow_symlinks=False)
@@ -149,7 +150,7 @@ def is_birthtime_fully_supported():
 
 
 def no_selinux(x):
 def no_selinux(x):
     # selinux fails our FUSE tests, thus ignore selinux xattrs
     # selinux fails our FUSE tests, thus ignore selinux xattrs
-    SELINUX_KEY = b'security.selinux'
+    SELINUX_KEY = b"security.selinux"
     if isinstance(x, dict):
     if isinstance(x, dict):
         return {k: v for k, v in x.items() if k != SELINUX_KEY}
         return {k: v for k, v in x.items() if k != SELINUX_KEY}
     if isinstance(x, list):
     if isinstance(x, list):
@@ -157,8 +158,8 @@ def no_selinux(x):
 
 
 
 
 class BaseTestCase(unittest.TestCase):
 class BaseTestCase(unittest.TestCase):
-    """
-    """
+    """ """
+
     assert_in = unittest.TestCase.assertIn
     assert_in = unittest.TestCase.assertIn
     assert_not_in = unittest.TestCase.assertNotIn
     assert_not_in = unittest.TestCase.assertNotIn
     assert_equal = unittest.TestCase.assertEqual
     assert_equal = unittest.TestCase.assertEqual
@@ -171,9 +172,9 @@ class BaseTestCase(unittest.TestCase):
 
 
     @contextmanager
     @contextmanager
     def assert_creates_file(self, path):
     def assert_creates_file(self, path):
-        assert not os.path.exists(path), f'{path} should not exist'
+        assert not os.path.exists(path), f"{path} should not exist"
         yield
         yield
-        assert os.path.exists(path), f'{path} should exist'
+        assert os.path.exists(path), f"{path} should exist"
 
 
     def assert_dirs_equal(self, dir1, dir2, **kwargs):
     def assert_dirs_equal(self, dir1, dir2, **kwargs):
         diff = filecmp.dircmp(dir1, dir2)
         diff = filecmp.dircmp(dir1, dir2)
@@ -191,10 +192,10 @@ class BaseTestCase(unittest.TestCase):
             s2 = os.stat(path2, follow_symlinks=False)
             s2 = os.stat(path2, follow_symlinks=False)
             # Assume path2 is on FUSE if st_dev is different
             # Assume path2 is on FUSE if st_dev is different
             fuse = s1.st_dev != s2.st_dev
             fuse = s1.st_dev != s2.st_dev
-            attrs = ['st_uid', 'st_gid', 'st_rdev']
+            attrs = ["st_uid", "st_gid", "st_rdev"]
             if not fuse or not os.path.isdir(path1):
             if not fuse or not os.path.isdir(path1):
                 # dir nlink is always 1 on our FUSE filesystem
                 # dir nlink is always 1 on our FUSE filesystem
-                attrs.append('st_nlink')
+                attrs.append("st_nlink")
             d1 = [filename] + [getattr(s1, a) for a in attrs]
             d1 = [filename] + [getattr(s1, a) for a in attrs]
             d2 = [filename] + [getattr(s2, a) for a in attrs]
             d2 = [filename] + [getattr(s2, a) for a in attrs]
             d1.insert(1, oct(s1.st_mode))
             d1.insert(1, oct(s1.st_mode))
@@ -225,7 +226,9 @@ class BaseTestCase(unittest.TestCase):
                 d2.append(no_selinux(get_all(path2, follow_symlinks=False)))
                 d2.append(no_selinux(get_all(path2, follow_symlinks=False)))
             self.assert_equal(d1, d2)
             self.assert_equal(d1, d2)
         for sub_diff in diff.subdirs.values():
         for sub_diff in diff.subdirs.values():
-            self._assert_dirs_equal_cmp(sub_diff, ignore_flags=ignore_flags, ignore_xattrs=ignore_xattrs, ignore_ns=ignore_ns)
+            self._assert_dirs_equal_cmp(
+                sub_diff, ignore_flags=ignore_flags, ignore_xattrs=ignore_xattrs, ignore_ns=ignore_ns
+            )
 
 
     @contextmanager
     @contextmanager
     def fuse_mount(self, location, mountpoint=None, *options, fork=True, os_fork=False, **kwargs):
     def fuse_mount(self, location, mountpoint=None, *options, fork=True, os_fork=False, **kwargs):
@@ -247,7 +250,7 @@ class BaseTestCase(unittest.TestCase):
             mountpoint = tempfile.mkdtemp()
             mountpoint = tempfile.mkdtemp()
         else:
         else:
             os.mkdir(mountpoint)
             os.mkdir(mountpoint)
-        args = [f'--repo={location}', 'mount', mountpoint] + list(options)
+        args = [f"--repo={location}", "mount", mountpoint] + list(options)
         if os_fork:
         if os_fork:
             # Do not spawn, but actually (OS) fork.
             # Do not spawn, but actually (OS) fork.
             if os.fork() == 0:
             if os.fork() == 0:
@@ -264,12 +267,11 @@ class BaseTestCase(unittest.TestCase):
                     # This should never be reached, since it daemonizes,
                     # This should never be reached, since it daemonizes,
                     # and the grandchild process exits before cmd() returns.
                     # and the grandchild process exits before cmd() returns.
                     # However, just in case...
                     # However, just in case...
-                    print('Fatal: borg mount did not daemonize properly. Force exiting.',
-                          file=sys.stderr, flush=True)
+                    print("Fatal: borg mount did not daemonize properly. Force exiting.", file=sys.stderr, flush=True)
                     os._exit(0)
                     os._exit(0)
         else:
         else:
             self.cmd(*args, fork=fork, **kwargs)
             self.cmd(*args, fork=fork, **kwargs)
-            if kwargs.get('exit_code', EXIT_SUCCESS) == EXIT_ERROR:
+            if kwargs.get("exit_code", EXIT_SUCCESS) == EXIT_ERROR:
                 # If argument `exit_code = EXIT_ERROR`, then this call
                 # If argument `exit_code = EXIT_ERROR`, then this call
                 # is testing the behavior of an unsuccessful mount and
                 # is testing the behavior of an unsuccessful mount and
                 # we must not continue, as there is no mount to work
                 # we must not continue, as there is no mount to work
@@ -292,7 +294,7 @@ class BaseTestCase(unittest.TestCase):
             if os.path.ismount(mountpoint) == mounted:
             if os.path.ismount(mountpoint) == mounted:
                 return
                 return
             time.sleep(0.1)
             time.sleep(0.1)
-        message = 'Waiting for {} of {}'.format('mount' if mounted else 'umount', mountpoint)
+        message = "Waiting for {} of {}".format("mount" if mounted else "umount", mountpoint)
         raise TimeoutError(message)
         raise TimeoutError(message)
 
 
     @contextmanager
     @contextmanager
@@ -308,17 +310,17 @@ class BaseTestCase(unittest.TestCase):
         tests are running with root privileges. Instead, the folder is
         tests are running with root privileges. Instead, the folder is
         rendered immutable with chattr or chflags, respectively.
         rendered immutable with chattr or chflags, respectively.
         """
         """
-        if sys.platform.startswith('linux'):
+        if sys.platform.startswith("linux"):
             cmd_immutable = 'chattr +i "%s"' % path
             cmd_immutable = 'chattr +i "%s"' % path
             cmd_mutable = 'chattr -i "%s"' % path
             cmd_mutable = 'chattr -i "%s"' % path
-        elif sys.platform.startswith(('darwin', 'freebsd', 'netbsd', 'openbsd')):
+        elif sys.platform.startswith(("darwin", "freebsd", "netbsd", "openbsd")):
             cmd_immutable = 'chflags uchg "%s"' % path
             cmd_immutable = 'chflags uchg "%s"' % path
             cmd_mutable = 'chflags nouchg "%s"' % path
             cmd_mutable = 'chflags nouchg "%s"' % path
-        elif sys.platform.startswith('sunos'):  # openindiana
+        elif sys.platform.startswith("sunos"):  # openindiana
             cmd_immutable = 'chmod S+vimmutable "%s"' % path
             cmd_immutable = 'chmod S+vimmutable "%s"' % path
             cmd_mutable = 'chmod S-vimmutable "%s"' % path
             cmd_mutable = 'chmod S-vimmutable "%s"' % path
         else:
         else:
-            message = 'Testing read-only repos is not supported on platform %s' % sys.platform
+            message = "Testing read-only repos is not supported on platform %s" % sys.platform
             self.skipTest(message)
             self.skipTest(message)
         try:
         try:
             os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path)
             os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path)
@@ -365,12 +367,13 @@ class environment_variable:
 
 
 class FakeInputs:
 class FakeInputs:
     """Simulate multiple user inputs, can be used as input() replacement"""
     """Simulate multiple user inputs, can be used as input() replacement"""
+
     def __init__(self, inputs):
     def __init__(self, inputs):
         self.inputs = inputs
         self.inputs = inputs
 
 
     def __call__(self, prompt=None):
     def __call__(self, prompt=None):
         if prompt is not None:
         if prompt is not None:
-            print(prompt, end='')
+            print(prompt, end="")
         try:
         try:
             return self.inputs.pop(0)
             return self.inputs.pop(0)
         except IndexError:
         except IndexError:

+ 79 - 74
src/borg/testsuite/archive.py

@@ -33,64 +33,66 @@ def test_stats_basic(stats):
 
 
 
 
 def tests_stats_progress(stats, monkeypatch, columns=80):
 def tests_stats_progress(stats, monkeypatch, columns=80):
-    monkeypatch.setenv('COLUMNS', str(columns))
+    monkeypatch.setenv("COLUMNS", str(columns))
     out = StringIO()
     out = StringIO()
     stats.show_progress(stream=out)
     stats.show_progress(stream=out)
-    s = '20 B O 20 B U 1 N '
-    buf = ' ' * (columns - len(s))
+    s = "20 B O 20 B U 1 N "
+    buf = " " * (columns - len(s))
     assert out.getvalue() == s + buf + "\r"
     assert out.getvalue() == s + buf + "\r"
 
 
     out = StringIO()
     out = StringIO()
-    stats.update(10 ** 3, unique=False)
-    stats.show_progress(item=Item(path='foo'), final=False, stream=out)
-    s = '1.02 kB O 20 B U 1 N foo'
-    buf = ' ' * (columns - len(s))
+    stats.update(10**3, unique=False)
+    stats.show_progress(item=Item(path="foo"), final=False, stream=out)
+    s = "1.02 kB O 20 B U 1 N foo"
+    buf = " " * (columns - len(s))
     assert out.getvalue() == s + buf + "\r"
     assert out.getvalue() == s + buf + "\r"
     out = StringIO()
     out = StringIO()
-    stats.show_progress(item=Item(path='foo'*40), final=False, stream=out)
-    s = '1.02 kB O 20 B U 1 N foofoofoofoofoofoofoofoofo...foofoofoofoofoofoofoofoofoofoo'
-    buf = ' ' * (columns - len(s))
+    stats.show_progress(item=Item(path="foo" * 40), final=False, stream=out)
+    s = "1.02 kB O 20 B U 1 N foofoofoofoofoofoofoofoofo...foofoofoofoofoofoofoofoofoofoo"
+    buf = " " * (columns - len(s))
     assert out.getvalue() == s + buf + "\r"
     assert out.getvalue() == s + buf + "\r"
 
 
 
 
 def test_stats_format(stats):
 def test_stats_format(stats):
-    assert str(stats) == """\
+    assert (
+        str(stats)
+        == """\
 Number of files: 1
 Number of files: 1
 Original size: 20 B
 Original size: 20 B
 Deduplicated size: 20 B
 Deduplicated size: 20 B
 """
 """
+    )
     s = f"{stats.osize_fmt}"
     s = f"{stats.osize_fmt}"
     assert s == "20 B"
     assert s == "20 B"
     # kind of redundant, but id is variable so we can't match reliably
     # kind of redundant, but id is variable so we can't match reliably
-    assert repr(stats) == f'<Statistics object at {id(stats):#x} (20, 20)>'
+    assert repr(stats) == f"<Statistics object at {id(stats):#x} (20, 20)>"
 
 
 
 
 def test_stats_progress_json(stats):
 def test_stats_progress_json(stats):
     stats.output_json = True
     stats.output_json = True
 
 
     out = StringIO()
     out = StringIO()
-    stats.show_progress(item=Item(path='foo'), stream=out)
+    stats.show_progress(item=Item(path="foo"), stream=out)
     result = json.loads(out.getvalue())
     result = json.loads(out.getvalue())
-    assert result['type'] == 'archive_progress'
-    assert isinstance(result['time'], float)
-    assert result['finished'] is False
-    assert result['path'] == 'foo'
-    assert result['original_size'] == 20
-    assert result['nfiles'] == 1
+    assert result["type"] == "archive_progress"
+    assert isinstance(result["time"], float)
+    assert result["finished"] is False
+    assert result["path"] == "foo"
+    assert result["original_size"] == 20
+    assert result["nfiles"] == 1
 
 
     out = StringIO()
     out = StringIO()
     stats.show_progress(stream=out, final=True)
     stats.show_progress(stream=out, final=True)
     result = json.loads(out.getvalue())
     result = json.loads(out.getvalue())
-    assert result['type'] == 'archive_progress'
-    assert isinstance(result['time'], float)
-    assert result['finished'] is True  # see #6570
-    assert 'path' not in result
-    assert 'original_size' not in result
-    assert 'nfiles' not in result
+    assert result["type"] == "archive_progress"
+    assert isinstance(result["time"], float)
+    assert result["finished"] is True  # see #6570
+    assert "path" not in result
+    assert "original_size" not in result
+    assert "nfiles" not in result
 
 
 
 
 class MockCache:
 class MockCache:
-
     class MockRepo:
     class MockRepo:
         def async_response(self, wait=True):
         def async_response(self, wait=True):
             pass
             pass
@@ -105,30 +107,24 @@ class MockCache:
 
 
 
 
 class ArchiveTimestampTestCase(BaseTestCase):
 class ArchiveTimestampTestCase(BaseTestCase):
-
     def _test_timestamp_parsing(self, isoformat, expected):
     def _test_timestamp_parsing(self, isoformat, expected):
         repository = Mock()
         repository = Mock()
         key = PlaintextKey(repository)
         key = PlaintextKey(repository)
         manifest = Manifest(repository, key)
         manifest = Manifest(repository, key)
-        a = Archive(repository, key, manifest, 'test', create=True)
+        a = Archive(repository, key, manifest, "test", create=True)
         a.metadata = ArchiveItem(time=isoformat)
         a.metadata = ArchiveItem(time=isoformat)
         self.assert_equal(a.ts, expected)
         self.assert_equal(a.ts, expected)
 
 
     def test_with_microseconds(self):
     def test_with_microseconds(self):
-        self._test_timestamp_parsing(
-            '1970-01-01T00:00:01.000001',
-            datetime(1970, 1, 1, 0, 0, 1, 1, timezone.utc))
+        self._test_timestamp_parsing("1970-01-01T00:00:01.000001", datetime(1970, 1, 1, 0, 0, 1, 1, timezone.utc))
 
 
     def test_without_microseconds(self):
     def test_without_microseconds(self):
-        self._test_timestamp_parsing(
-            '1970-01-01T00:00:01',
-            datetime(1970, 1, 1, 0, 0, 1, 0, timezone.utc))
+        self._test_timestamp_parsing("1970-01-01T00:00:01", datetime(1970, 1, 1, 0, 0, 1, 0, timezone.utc))
 
 
 
 
 class ChunkBufferTestCase(BaseTestCase):
 class ChunkBufferTestCase(BaseTestCase):
-
     def test(self):
     def test(self):
-        data = [Item(path='p1'), Item(path='p2')]
+        data = [Item(path="p1"), Item(path="p2")]
         cache = MockCache()
         cache = MockCache()
         key = PlaintextKey(None)
         key = PlaintextKey(None)
         chunks = CacheChunkBuffer(cache, key, None)
         chunks = CacheChunkBuffer(cache, key, None)
@@ -144,7 +140,7 @@ class ChunkBufferTestCase(BaseTestCase):
 
 
     def test_partial(self):
     def test_partial(self):
         big = "0123456789abcdefghijklmnopqrstuvwxyz" * 25000
         big = "0123456789abcdefghijklmnopqrstuvwxyz" * 25000
-        data = [Item(path='full', source=big), Item(path='partial', source=big)]
+        data = [Item(path="full", source=big), Item(path="partial", source=big)]
         cache = MockCache()
         cache = MockCache()
         key = PlaintextKey(None)
         key = PlaintextKey(None)
         chunks = CacheChunkBuffer(cache, key, None)
         chunks = CacheChunkBuffer(cache, key, None)
@@ -165,12 +161,11 @@ class ChunkBufferTestCase(BaseTestCase):
 
 
 
 
 class RobustUnpackerTestCase(BaseTestCase):
 class RobustUnpackerTestCase(BaseTestCase):
-
     def make_chunks(self, items):
     def make_chunks(self, items):
-        return b''.join(msgpack.packb({'path': item}) for item in items)
+        return b"".join(msgpack.packb({"path": item}) for item in items)
 
 
     def _validator(self, value):
     def _validator(self, value):
-        return isinstance(value, dict) and value.get('path') in ('foo', 'bar', 'boo', 'baz')
+        return isinstance(value, dict) and value.get("path") in ("foo", "bar", "boo", "baz")
 
 
     def process(self, input):
     def process(self, input):
         unpacker = RobustUnpacker(validator=self._validator, item_keys=ITEM_KEYS)
         unpacker = RobustUnpacker(validator=self._validator, item_keys=ITEM_KEYS)
@@ -185,14 +180,14 @@ class RobustUnpackerTestCase(BaseTestCase):
         return result
         return result
 
 
     def test_extra_garbage_no_sync(self):
     def test_extra_garbage_no_sync(self):
-        chunks = [(False, [self.make_chunks(['foo', 'bar'])]),
-                  (False, [b'garbage'] + [self.make_chunks(['boo', 'baz'])])]
+        chunks = [
+            (False, [self.make_chunks(["foo", "bar"])]),
+            (False, [b"garbage"] + [self.make_chunks(["boo", "baz"])]),
+        ]
         result = self.process(chunks)
         result = self.process(chunks)
-        self.assert_equal(result, [
-            {'path': 'foo'}, {'path': 'bar'},
-            103, 97, 114, 98, 97, 103, 101,
-            {'path': 'boo'},
-            {'path': 'baz'}])
+        self.assert_equal(
+            result, [{"path": "foo"}, {"path": "bar"}, 103, 97, 114, 98, 97, 103, 101, {"path": "boo"}, {"path": "baz"}]
+        )
 
 
     def split(self, left, length):
     def split(self, left, length):
         parts = []
         parts = []
@@ -202,22 +197,22 @@ class RobustUnpackerTestCase(BaseTestCase):
         return parts
         return parts
 
 
     def test_correct_stream(self):
     def test_correct_stream(self):
-        chunks = self.split(self.make_chunks(['foo', 'bar', 'boo', 'baz']), 2)
+        chunks = self.split(self.make_chunks(["foo", "bar", "boo", "baz"]), 2)
         input = [(False, chunks)]
         input = [(False, chunks)]
         result = self.process(input)
         result = self.process(input)
-        self.assert_equal(result, [{'path': 'foo'}, {'path': 'bar'}, {'path': 'boo'}, {'path': 'baz'}])
+        self.assert_equal(result, [{"path": "foo"}, {"path": "bar"}, {"path": "boo"}, {"path": "baz"}])
 
 
     def test_missing_chunk(self):
     def test_missing_chunk(self):
-        chunks = self.split(self.make_chunks(['foo', 'bar', 'boo', 'baz']), 4)
+        chunks = self.split(self.make_chunks(["foo", "bar", "boo", "baz"]), 4)
         input = [(False, chunks[:3]), (True, chunks[4:])]
         input = [(False, chunks[:3]), (True, chunks[4:])]
         result = self.process(input)
         result = self.process(input)
-        self.assert_equal(result, [{'path': 'foo'}, {'path': 'boo'}, {'path': 'baz'}])
+        self.assert_equal(result, [{"path": "foo"}, {"path": "boo"}, {"path": "baz"}])
 
 
     def test_corrupt_chunk(self):
     def test_corrupt_chunk(self):
-        chunks = self.split(self.make_chunks(['foo', 'bar', 'boo', 'baz']), 4)
-        input = [(False, chunks[:3]), (True, [b'gar', b'bage'] + chunks[3:])]
+        chunks = self.split(self.make_chunks(["foo", "bar", "boo", "baz"]), 4)
+        input = [(False, chunks[:3]), (True, [b"gar", b"bage"] + chunks[3:])]
         result = self.process(input)
         result = self.process(input)
-        self.assert_equal(result, [{'path': 'foo'}, {'path': 'boo'}, {'path': 'baz'}])
+        self.assert_equal(result, [{"path": "foo"}, {"path": "boo"}, {"path": "baz"}])
 
 
 
 
 @pytest.fixture
 @pytest.fixture
@@ -225,12 +220,17 @@ def item_keys_serialized():
     return [msgpack.packb(name) for name in ITEM_KEYS]
     return [msgpack.packb(name) for name in ITEM_KEYS]
 
 
 
 
-@pytest.mark.parametrize('packed',
-    [b'', b'x', b'foobar', ] +
-    [msgpack.packb(o) for o in (
-        [None, 0, 0.0, False, '', {}, [], ()] +
-        [42, 23.42, True, b'foobar', {b'foo': b'bar'}, [b'foo', b'bar'], (b'foo', b'bar')]
-    )])
+@pytest.mark.parametrize(
+    "packed",
+    [b"", b"x", b"foobar"]
+    + [
+        msgpack.packb(o)
+        for o in (
+            [None, 0, 0.0, False, "", {}, [], ()]
+            + [42, 23.42, True, b"foobar", {b"foo": b"bar"}, [b"foo", b"bar"], (b"foo", b"bar")]
+        )
+    ],
+)
 def test_invalid_msgpacked_item(packed, item_keys_serialized):
 def test_invalid_msgpacked_item(packed, item_keys_serialized):
     assert not valid_msgpacked_dict(packed, item_keys_serialized)
     assert not valid_msgpacked_dict(packed, item_keys_serialized)
 
 
@@ -239,20 +239,25 @@ def test_invalid_msgpacked_item(packed, item_keys_serialized):
 IK = sorted(list(ITEM_KEYS))
 IK = sorted(list(ITEM_KEYS))
 
 
 
 
-@pytest.mark.parametrize('packed',
-    [msgpack.packb(o) for o in [
-        {'path': b'/a/b/c'},  # small (different msgpack mapping type!)
-        OrderedDict((k, b'') for k in IK),  # as big (key count) as it gets
-        OrderedDict((k, b'x' * 1000) for k in IK),  # as big (key count and volume) as it gets
-    ]])
+@pytest.mark.parametrize(
+    "packed",
+    [
+        msgpack.packb(o)
+        for o in [
+            {"path": b"/a/b/c"},  # small (different msgpack mapping type!)
+            OrderedDict((k, b"") for k in IK),  # as big (key count) as it gets
+            OrderedDict((k, b"x" * 1000) for k in IK),  # as big (key count and volume) as it gets
+        ]
+    ],
+)
 def test_valid_msgpacked_items(packed, item_keys_serialized):
 def test_valid_msgpacked_items(packed, item_keys_serialized):
     assert valid_msgpacked_dict(packed, item_keys_serialized)
     assert valid_msgpacked_dict(packed, item_keys_serialized)
 
 
 
 
 def test_key_length_msgpacked_items():
 def test_key_length_msgpacked_items():
-    key = 'x' * 32  # 31 bytes is the limit for fixstr msgpack type
-    data = {key: b''}
-    item_keys_serialized = [msgpack.packb(key), ]
+    key = "x" * 32  # 31 bytes is the limit for fixstr msgpack type
+    data = {key: b""}
+    item_keys_serialized = [msgpack.packb(key)]
     assert valid_msgpacked_dict(msgpack.packb(data), item_keys_serialized)
     assert valid_msgpacked_dict(msgpack.packb(data), item_keys_serialized)
 
 
 
 
@@ -277,7 +282,7 @@ def test_backup_io_iter():
 
 
     normal_iterator = Iterator(StopIteration)
     normal_iterator = Iterator(StopIteration)
     for _ in backup_io_iter(normal_iterator):
     for _ in backup_io_iter(normal_iterator):
-        assert False, 'StopIteration handled incorrectly'
+        assert False, "StopIteration handled incorrectly"
 
 
 
 
 def test_get_item_uid_gid():
 def test_get_item_uid_gid():
@@ -288,7 +293,7 @@ def test_get_item_uid_gid():
     user0, group0 = uid2user(0), gid2group(0)
     user0, group0 = uid2user(0), gid2group(0)
 
 
     # this is intentionally a "strange" item, with not matching ids/names.
     # this is intentionally a "strange" item, with not matching ids/names.
-    item = Item(path='filename', uid=1, gid=2, user=user0, group=group0)
+    item = Item(path="filename", uid=1, gid=2, user=user0, group=group0)
 
 
     uid, gid = get_item_uid_gid(item, numeric=False)
     uid, gid = get_item_uid_gid(item, numeric=False)
     # these are found via a name-to-id lookup
     # these are found via a name-to-id lookup
@@ -306,7 +311,7 @@ def test_get_item_uid_gid():
     assert gid == 4
     assert gid == 4
 
 
     # item metadata broken, has negative ids.
     # item metadata broken, has negative ids.
-    item = Item(path='filename', uid=-1, gid=-2, user=user0, group=group0)
+    item = Item(path="filename", uid=-1, gid=-2, user=user0, group=group0)
 
 
     uid, gid = get_item_uid_gid(item, numeric=True)
     uid, gid = get_item_uid_gid(item, numeric=True)
     # use the uid/gid defaults (which both default to 0).
     # use the uid/gid defaults (which both default to 0).
@@ -319,7 +324,7 @@ def test_get_item_uid_gid():
     assert gid == 6
     assert gid == 6
 
 
     # item metadata broken, has negative ids and non-existing user/group names.
     # item metadata broken, has negative ids and non-existing user/group names.
-    item = Item(path='filename', uid=-3, gid=-4, user='udoesnotexist', group='gdoesnotexist')
+    item = Item(path="filename", uid=-3, gid=-4, user="udoesnotexist", group="gdoesnotexist")
 
 
     uid, gid = get_item_uid_gid(item, numeric=False)
     uid, gid = get_item_uid_gid(item, numeric=False)
     # use the uid/gid defaults (which both default to 0).
     # use the uid/gid defaults (which both default to 0).
@@ -332,7 +337,7 @@ def test_get_item_uid_gid():
     assert gid == 8
     assert gid == 8
 
 
     # item metadata has valid uid/gid, but non-existing user/group names.
     # item metadata has valid uid/gid, but non-existing user/group names.
-    item = Item(path='filename', uid=9, gid=10, user='udoesnotexist', group='gdoesnotexist')
+    item = Item(path="filename", uid=9, gid=10, user="udoesnotexist", group="gdoesnotexist")
 
 
     uid, gid = get_item_uid_gid(item, numeric=False)
     uid, gid = get_item_uid_gid(item, numeric=False)
     # because user/group name does not exist here, use valid numeric ids from item metadata.
     # because user/group name does not exist here, use valid numeric ids from item metadata.

Diff do ficheiro suprimidas por serem muito extensas
+ 1055 - 913
src/borg/testsuite/archiver.py


+ 27 - 26
src/borg/testsuite/benchmark.py

@@ -16,35 +16,38 @@ from ..constants import zeros
 
 
 @pytest.fixture
 @pytest.fixture
 def repo_url(request, tmpdir, monkeypatch):
 def repo_url(request, tmpdir, monkeypatch):
-    monkeypatch.setenv('BORG_PASSPHRASE', '123456')
-    monkeypatch.setenv('BORG_CHECK_I_KNOW_WHAT_I_AM_DOING', 'YES')
-    monkeypatch.setenv('BORG_DELETE_I_KNOW_WHAT_I_AM_DOING', 'YES')
-    monkeypatch.setenv('BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK', 'yes')
-    monkeypatch.setenv('BORG_KEYS_DIR', str(tmpdir.join('keys')))
-    monkeypatch.setenv('BORG_CACHE_DIR', str(tmpdir.join('cache')))
-    yield str(tmpdir.join('repository'))
+    monkeypatch.setenv("BORG_PASSPHRASE", "123456")
+    monkeypatch.setenv("BORG_CHECK_I_KNOW_WHAT_I_AM_DOING", "YES")
+    monkeypatch.setenv("BORG_DELETE_I_KNOW_WHAT_I_AM_DOING", "YES")
+    monkeypatch.setenv("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK", "yes")
+    monkeypatch.setenv("BORG_KEYS_DIR", str(tmpdir.join("keys")))
+    monkeypatch.setenv("BORG_CACHE_DIR", str(tmpdir.join("cache")))
+    yield str(tmpdir.join("repository"))
     tmpdir.remove(rec=1)
     tmpdir.remove(rec=1)
 
 
 
 
 @pytest.fixture(params=["none", "repokey-aes-ocb"])
 @pytest.fixture(params=["none", "repokey-aes-ocb"])
 def repo(request, cmd, repo_url):
 def repo(request, cmd, repo_url):
-    cmd(f'--repo={repo_url}', 'rcreate', '--encryption', request.param)
+    cmd(f"--repo={repo_url}", "rcreate", "--encryption", request.param)
     return repo_url
     return repo_url
 
 
 
 
-@pytest.fixture(scope='session', params=["zeros", "random"])
+@pytest.fixture(scope="session", params=["zeros", "random"])
 def testdata(request, tmpdir_factory):
 def testdata(request, tmpdir_factory):
-    count, size = 10, 1000*1000
+    count, size = 10, 1000 * 1000
     assert size <= len(zeros)
     assert size <= len(zeros)
-    p = tmpdir_factory.mktemp('data')
+    p = tmpdir_factory.mktemp("data")
     data_type = request.param
     data_type = request.param
-    if data_type == 'zeros':
+    if data_type == "zeros":
         # do not use a binary zero (\0) to avoid sparse detection
         # do not use a binary zero (\0) to avoid sparse detection
         def data(size):
         def data(size):
             return memoryview(zeros)[:size]
             return memoryview(zeros)[:size]
-    elif data_type == 'random':
+
+    elif data_type == "random":
+
         def data(size):
         def data(size):
             return os.urandom(size)
             return os.urandom(size)
+
     else:
     else:
         raise ValueError("data_type must be 'random' or 'zeros'.")
         raise ValueError("data_type must be 'random' or 'zeros'.")
     for i in range(count):
     for i in range(count):
@@ -54,56 +57,54 @@ def testdata(request, tmpdir_factory):
     p.remove(rec=1)
     p.remove(rec=1)
 
 
 
 
-@pytest.fixture(params=['none', 'lz4'])
+@pytest.fixture(params=["none", "lz4"])
 def repo_archive(request, cmd, repo, testdata):
 def repo_archive(request, cmd, repo, testdata):
-    archive = 'test'
-    cmd(f'--repo={repo}', 'create', '--compression', request.param, archive, testdata)
+    archive = "test"
+    cmd(f"--repo={repo}", "create", "--compression", request.param, archive, testdata)
     return repo, archive
     return repo, archive
 
 
 
 
 def test_create_none(benchmark, cmd, repo, testdata):
 def test_create_none(benchmark, cmd, repo, testdata):
-    result, out = benchmark.pedantic(cmd, (f'--repo={repo}', 'create', '--compression', 'none',
-                                           'test', testdata))
+    result, out = benchmark.pedantic(cmd, (f"--repo={repo}", "create", "--compression", "none", "test", testdata))
     assert result == 0
     assert result == 0
 
 
 
 
 def test_create_lz4(benchmark, cmd, repo, testdata):
 def test_create_lz4(benchmark, cmd, repo, testdata):
-    result, out = benchmark.pedantic(cmd, (f'--repo={repo}', 'create', '--compression', 'lz4',
-                                           'test', testdata))
+    result, out = benchmark.pedantic(cmd, (f"--repo={repo}", "create", "--compression", "lz4", "test", testdata))
     assert result == 0
     assert result == 0
 
 
 
 
 def test_extract(benchmark, cmd, repo_archive, tmpdir):
 def test_extract(benchmark, cmd, repo_archive, tmpdir):
     repo, archive = repo_archive
     repo, archive = repo_archive
     with changedir(str(tmpdir)):
     with changedir(str(tmpdir)):
-        result, out = benchmark.pedantic(cmd, (f'--repo={repo}', 'extract', archive))
+        result, out = benchmark.pedantic(cmd, (f"--repo={repo}", "extract", archive))
     assert result == 0
     assert result == 0
 
 
 
 
 def test_delete(benchmark, cmd, repo_archive):
 def test_delete(benchmark, cmd, repo_archive):
     repo, archive = repo_archive
     repo, archive = repo_archive
-    result, out = benchmark.pedantic(cmd, (f'--repo={repo}', 'delete', '-a', archive))
+    result, out = benchmark.pedantic(cmd, (f"--repo={repo}", "delete", "-a", archive))
     assert result == 0
     assert result == 0
 
 
 
 
 def test_list(benchmark, cmd, repo_archive):
 def test_list(benchmark, cmd, repo_archive):
     repo, archive = repo_archive
     repo, archive = repo_archive
-    result, out = benchmark(cmd, f'--repo={repo}', 'list', archive)
+    result, out = benchmark(cmd, f"--repo={repo}", "list", archive)
     assert result == 0
     assert result == 0
 
 
 
 
 def test_info(benchmark, cmd, repo_archive):
 def test_info(benchmark, cmd, repo_archive):
     repo, archive = repo_archive
     repo, archive = repo_archive
-    result, out = benchmark(cmd, f'--repo={repo}', 'info', '-a', archive)
+    result, out = benchmark(cmd, f"--repo={repo}", "info", "-a", archive)
     assert result == 0
     assert result == 0
 
 
 
 
 def test_check(benchmark, cmd, repo_archive):
 def test_check(benchmark, cmd, repo_archive):
     repo, archive = repo_archive
     repo, archive = repo_archive
-    result, out = benchmark(cmd, f'--repo={repo}', 'check')
+    result, out = benchmark(cmd, f"--repo={repo}", "check")
     assert result == 0
     assert result == 0
 
 
 
 
 def test_help(benchmark, cmd):
 def test_help(benchmark, cmd):
-    result, out = benchmark(cmd, 'help')
+    result, out = benchmark(cmd, "help")
     assert result == 0
     assert result == 0

+ 72 - 124
src/borg/testsuite/cache.py

@@ -26,75 +26,29 @@ class TestCacheSynchronizer:
         return CacheSynchronizer(index)
         return CacheSynchronizer(index)
 
 
     def test_no_chunks(self, index, sync):
     def test_no_chunks(self, index, sync):
-        data = packb({
-            'foo': 'bar',
-            'baz': 1234,
-            'bar': 5678,
-            'user': 'chunks',
-            'chunks': []
-        })
+        data = packb({"foo": "bar", "baz": 1234, "bar": 5678, "user": "chunks", "chunks": []})
         sync.feed(data)
         sync.feed(data)
         assert not len(index)
         assert not len(index)
 
 
     def test_simple(self, index, sync):
     def test_simple(self, index, sync):
-        data = packb({
-            'foo': 'bar',
-            'baz': 1234,
-            'bar': 5678,
-            'user': 'chunks',
-            'chunks': [
-                (H(1), 1),
-                (H(2), 2),
-            ]
-        })
+        data = packb({"foo": "bar", "baz": 1234, "bar": 5678, "user": "chunks", "chunks": [(H(1), 1), (H(2), 2)]})
         sync.feed(data)
         sync.feed(data)
         assert len(index) == 2
         assert len(index) == 2
         assert index[H(1)] == (1, 1)
         assert index[H(1)] == (1, 1)
         assert index[H(2)] == (1, 2)
         assert index[H(2)] == (1, 2)
 
 
     def test_multiple(self, index, sync):
     def test_multiple(self, index, sync):
-        data = packb({
-            'foo': 'bar',
-            'baz': 1234,
-            'bar': 5678,
-            'user': 'chunks',
-            'chunks': [
-                (H(1), 1),
-                (H(2), 2),
-            ]
-        })
-        data += packb({
-            'xattrs': {
-                'security.foo': 'bar',
-                'chunks': '123456',
-            },
-            'stuff': [
-                (1, 2, 3),
-            ]
-        })
-        data += packb({
-            'xattrs': {
-                'security.foo': 'bar',
-                'chunks': '123456',
-            },
-            'chunks': [
-                (H(1), 1),
-                (H(2), 2),
-            ],
-            'stuff': [
-                (1, 2, 3),
-            ]
-        })
-        data += packb({
-            'chunks': [
-                (H(3), 1),
-            ],
-        })
-        data += packb({
-            'chunks': [
-                (H(1), 1),
-            ],
-        })
+        data = packb({"foo": "bar", "baz": 1234, "bar": 5678, "user": "chunks", "chunks": [(H(1), 1), (H(2), 2)]})
+        data += packb({"xattrs": {"security.foo": "bar", "chunks": "123456"}, "stuff": [(1, 2, 3)]})
+        data += packb(
+            {
+                "xattrs": {"security.foo": "bar", "chunks": "123456"},
+                "chunks": [(H(1), 1), (H(2), 2)],
+                "stuff": [(1, 2, 3)],
+            }
+        )
+        data += packb({"chunks": [(H(3), 1)]})
+        data += packb({"chunks": [(H(1), 1)]})
 
 
         part1 = data[:70]
         part1 = data[:70]
         part2 = data[70:120]
         part2 = data[70:120]
@@ -107,62 +61,68 @@ class TestCacheSynchronizer:
         assert index[H(2)] == (2, 2)
         assert index[H(2)] == (2, 2)
         assert index[H(3)] == (1, 1)
         assert index[H(3)] == (1, 1)
 
 
-    @pytest.mark.parametrize('elem,error', (
-        ({1: 2}, 'Unexpected object: map'),
-        (bytes(213), [
-            'Unexpected bytes in chunks structure',  # structure 2/3
-            'Incorrect key length']),                # structure 3/3
-        (1, 'Unexpected object: integer'),
-        (1.0, 'Unexpected object: double'),
-        (True, 'Unexpected object: true'),
-        (False, 'Unexpected object: false'),
-        (None, 'Unexpected object: nil'),
-    ))
-    @pytest.mark.parametrize('structure', (
-        lambda elem: {'chunks': elem},
-        lambda elem: {'chunks': [elem]},
-        lambda elem: {'chunks': [(elem, 1)]},
-    ))
+    @pytest.mark.parametrize(
+        "elem,error",
+        (
+            ({1: 2}, "Unexpected object: map"),
+            (
+                bytes(213),
+                ["Unexpected bytes in chunks structure", "Incorrect key length"],  # structure 2/3
+            ),  # structure 3/3
+            (1, "Unexpected object: integer"),
+            (1.0, "Unexpected object: double"),
+            (True, "Unexpected object: true"),
+            (False, "Unexpected object: false"),
+            (None, "Unexpected object: nil"),
+        ),
+    )
+    @pytest.mark.parametrize(
+        "structure",
+        (lambda elem: {"chunks": elem}, lambda elem: {"chunks": [elem]}, lambda elem: {"chunks": [(elem, 1)]}),
+    )
     def test_corrupted(self, sync, structure, elem, error):
     def test_corrupted(self, sync, structure, elem, error):
         packed = packb(structure(elem))
         packed = packb(structure(elem))
         with pytest.raises(ValueError) as excinfo:
         with pytest.raises(ValueError) as excinfo:
             sync.feed(packed)
             sync.feed(packed)
         if isinstance(error, str):
         if isinstance(error, str):
             error = [error]
             error = [error]
-        possible_errors = ['cache_sync_feed failed: ' + error for error in error]
+        possible_errors = ["cache_sync_feed failed: " + error for error in error]
         assert str(excinfo.value) in possible_errors
         assert str(excinfo.value) in possible_errors
 
 
-    @pytest.mark.parametrize('data,error', (
-        # Incorrect tuple length
-        ({'chunks': [(bytes(32), 2, 3, 4)]}, 'Invalid chunk list entry length'),
-        ({'chunks': [(bytes(32), )]}, 'Invalid chunk list entry length'),
-        # Incorrect types
-        ({'chunks': [(1, 2)]}, 'Unexpected object: integer'),
-        ({'chunks': [(1, bytes(32))]}, 'Unexpected object: integer'),
-        ({'chunks': [(bytes(32), 1.0)]}, 'Unexpected object: double'),
-    ))
+    @pytest.mark.parametrize(
+        "data,error",
+        (
+            # Incorrect tuple length
+            ({"chunks": [(bytes(32), 2, 3, 4)]}, "Invalid chunk list entry length"),
+            ({"chunks": [(bytes(32),)]}, "Invalid chunk list entry length"),
+            # Incorrect types
+            ({"chunks": [(1, 2)]}, "Unexpected object: integer"),
+            ({"chunks": [(1, bytes(32))]}, "Unexpected object: integer"),
+            ({"chunks": [(bytes(32), 1.0)]}, "Unexpected object: double"),
+        ),
+    )
     def test_corrupted_ancillary(self, index, sync, data, error):
     def test_corrupted_ancillary(self, index, sync, data, error):
         packed = packb(data)
         packed = packb(data)
         with pytest.raises(ValueError) as excinfo:
         with pytest.raises(ValueError) as excinfo:
             sync.feed(packed)
             sync.feed(packed)
-        assert str(excinfo.value) == 'cache_sync_feed failed: ' + error
+        assert str(excinfo.value) == "cache_sync_feed failed: " + error
 
 
     def make_index_with_refcount(self, refcount):
     def make_index_with_refcount(self, refcount):
         index_data = io.BytesIO()
         index_data = io.BytesIO()
-        index_data.write(b'BORG_IDX')
+        index_data.write(b"BORG_IDX")
         # num_entries
         # num_entries
-        index_data.write((1).to_bytes(4, 'little'))
+        index_data.write((1).to_bytes(4, "little"))
         # num_buckets
         # num_buckets
-        index_data.write((1).to_bytes(4, 'little'))
+        index_data.write((1).to_bytes(4, "little"))
         # key_size
         # key_size
-        index_data.write((32).to_bytes(1, 'little'))
+        index_data.write((32).to_bytes(1, "little"))
         # value_size
         # value_size
-        index_data.write((3 * 4).to_bytes(1, 'little'))
+        index_data.write((3 * 4).to_bytes(1, "little"))
 
 
         index_data.write(H(0))
         index_data.write(H(0))
-        index_data.write(refcount.to_bytes(4, 'little'))
-        index_data.write((1234).to_bytes(4, 'little'))
-        index_data.write((5678).to_bytes(4, 'little'))
+        index_data.write(refcount.to_bytes(4, "little"))
+        index_data.write((1234).to_bytes(4, "little"))
+        index_data.write((5678).to_bytes(4, "little"))
 
 
         index_data.seek(0)
         index_data.seek(0)
         index = ChunkIndex.read(index_data)
         index = ChunkIndex.read(index_data)
@@ -171,34 +131,22 @@ class TestCacheSynchronizer:
     def test_corrupted_refcount(self):
     def test_corrupted_refcount(self):
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE + 1)
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE + 1)
         sync = CacheSynchronizer(index)
         sync = CacheSynchronizer(index)
-        data = packb({
-            'chunks': [
-                (H(0), 1),
-            ]
-        })
+        data = packb({"chunks": [(H(0), 1)]})
         with pytest.raises(ValueError) as excinfo:
         with pytest.raises(ValueError) as excinfo:
             sync.feed(data)
             sync.feed(data)
-        assert str(excinfo.value) == 'cache_sync_feed failed: invalid reference count'
+        assert str(excinfo.value) == "cache_sync_feed failed: invalid reference count"
 
 
     def test_refcount_max_value(self):
     def test_refcount_max_value(self):
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE)
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE)
         sync = CacheSynchronizer(index)
         sync = CacheSynchronizer(index)
-        data = packb({
-            'chunks': [
-                (H(0), 1),
-            ]
-        })
+        data = packb({"chunks": [(H(0), 1)]})
         sync.feed(data)
         sync.feed(data)
         assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234)
         assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234)
 
 
     def test_refcount_one_below_max_value(self):
     def test_refcount_one_below_max_value(self):
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE - 1)
         index = self.make_index_with_refcount(ChunkIndex.MAX_VALUE - 1)
         sync = CacheSynchronizer(index)
         sync = CacheSynchronizer(index)
-        data = packb({
-            'chunks': [
-                (H(0), 1),
-            ]
-        })
+        data = packb({"chunks": [(H(0), 1)]})
         sync.feed(data)
         sync.feed(data)
         # Incremented to maximum
         # Incremented to maximum
         assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234)
         assert index[H(0)] == (ChunkIndex.MAX_VALUE, 1234)
@@ -209,17 +157,17 @@ class TestCacheSynchronizer:
 class TestAdHocCache:
 class TestAdHocCache:
     @pytest.fixture
     @pytest.fixture
     def repository(self, tmpdir):
     def repository(self, tmpdir):
-        self.repository_location = os.path.join(str(tmpdir), 'repository')
+        self.repository_location = os.path.join(str(tmpdir), "repository")
         with Repository(self.repository_location, exclusive=True, create=True) as repository:
         with Repository(self.repository_location, exclusive=True, create=True) as repository:
-            repository.put(H(1), b'1234')
-            repository.put(Manifest.MANIFEST_ID, b'5678')
+            repository.put(H(1), b"1234")
+            repository.put(Manifest.MANIFEST_ID, b"5678")
             yield repository
             yield repository
 
 
     @pytest.fixture
     @pytest.fixture
     def key(self, repository, monkeypatch):
     def key(self, repository, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         key = AESOCBRepoKey.create(repository, TestKey.MockArgs())
         key = AESOCBRepoKey.create(repository, TestKey.MockArgs())
-        key.compressor = CompressionSpec('none').compressor
+        key.compressor = CompressionSpec("none").compressor
         return key
         return key
 
 
     @pytest.fixture
     @pytest.fixture
@@ -237,18 +185,18 @@ class TestAdHocCache:
     def test_does_not_delete_existing_chunks(self, repository, cache):
     def test_does_not_delete_existing_chunks(self, repository, cache):
         assert cache.seen_chunk(H(1)) == ChunkIndex.MAX_VALUE
         assert cache.seen_chunk(H(1)) == ChunkIndex.MAX_VALUE
         cache.chunk_decref(H(1), Statistics())
         cache.chunk_decref(H(1), Statistics())
-        assert repository.get(H(1)) == b'1234'
+        assert repository.get(H(1)) == b"1234"
 
 
     def test_does_not_overwrite(self, cache):
     def test_does_not_overwrite(self, cache):
         with pytest.raises(AssertionError):
         with pytest.raises(AssertionError):
-            cache.add_chunk(H(1), b'5678', Statistics(), overwrite=True)
+            cache.add_chunk(H(1), b"5678", Statistics(), overwrite=True)
 
 
     def test_seen_chunk_add_chunk_size(self, cache):
     def test_seen_chunk_add_chunk_size(self, cache):
-        assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4)
+        assert cache.add_chunk(H(1), b"5678", Statistics()) == (H(1), 4)
 
 
     def test_deletes_chunks_during_lifetime(self, cache, repository):
     def test_deletes_chunks_during_lifetime(self, cache, repository):
         """E.g. checkpoint archives"""
         """E.g. checkpoint archives"""
-        cache.add_chunk(H(5), b'1010', Statistics())
+        cache.add_chunk(H(5), b"1010", Statistics())
         assert cache.seen_chunk(H(5)) == 1
         assert cache.seen_chunk(H(5)) == 1
         cache.chunk_decref(H(5), Statistics())
         cache.chunk_decref(H(5), Statistics())
         assert not cache.seen_chunk(H(5))
         assert not cache.seen_chunk(H(5))
@@ -256,8 +204,8 @@ class TestAdHocCache:
             repository.get(H(5))
             repository.get(H(5))
 
 
     def test_files_cache(self, cache):
     def test_files_cache(self, cache):
-        assert cache.file_known_and_unchanged(b'foo', bytes(32), None) == (False, None)
-        assert cache.cache_mode == 'd'
+        assert cache.file_known_and_unchanged(b"foo", bytes(32), None) == (False, None)
+        assert cache.cache_mode == "d"
         assert cache.files is None
         assert cache.files is None
 
 
     def test_txn(self, cache):
     def test_txn(self, cache):
@@ -267,13 +215,13 @@ class TestAdHocCache:
         assert cache.chunks
         assert cache.chunks
         cache.rollback()
         cache.rollback()
         assert not cache._txn_active
         assert not cache._txn_active
-        assert not hasattr(cache, 'chunks')
+        assert not hasattr(cache, "chunks")
 
 
     def test_incref_after_add_chunk(self, cache):
     def test_incref_after_add_chunk(self, cache):
-        assert cache.add_chunk(H(3), b'5678', Statistics()) == (H(3), 4)
+        assert cache.add_chunk(H(3), b"5678", Statistics()) == (H(3), 4)
         assert cache.chunk_incref(H(3), Statistics()) == (H(3), 4)
         assert cache.chunk_incref(H(3), Statistics()) == (H(3), 4)
 
 
     def test_existing_incref_after_add_chunk(self, cache):
     def test_existing_incref_after_add_chunk(self, cache):
         """This case occurs with part files, see Archive.chunk_file."""
         """This case occurs with part files, see Archive.chunk_file."""
-        assert cache.add_chunk(H(1), b'5678', Statistics()) == (H(1), 4)
+        assert cache.add_chunk(H(1), b"5678", Statistics()) == (H(1), 4)
         assert cache.chunk_incref(H(1), Statistics()) == (H(1), 4)
         assert cache.chunk_incref(H(1), Statistics()) == (H(1), 4)

+ 17 - 9
src/borg/testsuite/checksums.py

@@ -5,16 +5,24 @@ from ..helpers import bin_to_hex
 
 
 
 
 def test_xxh64():
 def test_xxh64():
-    assert bin_to_hex(checksums.xxh64(b'test', 123)) == '2b81b9401bef86cf'
-    assert bin_to_hex(checksums.xxh64(b'test')) == '4fdcca5ddb678139'
-    assert bin_to_hex(checksums.xxh64(unhexlify(
-        '6f663f01c118abdea553373d5eae44e7dac3b6829b46b9bbeff202b6c592c22d724'
-        'fb3d25a347cca6c5b8f20d567e4bb04b9cfa85d17f691590f9a9d32e8ccc9102e9d'
-        'cf8a7e6716280cd642ce48d03fdf114c9f57c20d9472bb0f81c147645e6fa3d331'))) == '35d5d2f545d9511a'
+    assert bin_to_hex(checksums.xxh64(b"test", 123)) == "2b81b9401bef86cf"
+    assert bin_to_hex(checksums.xxh64(b"test")) == "4fdcca5ddb678139"
+    assert (
+        bin_to_hex(
+            checksums.xxh64(
+                unhexlify(
+                    "6f663f01c118abdea553373d5eae44e7dac3b6829b46b9bbeff202b6c592c22d724"
+                    "fb3d25a347cca6c5b8f20d567e4bb04b9cfa85d17f691590f9a9d32e8ccc9102e9d"
+                    "cf8a7e6716280cd642ce48d03fdf114c9f57c20d9472bb0f81c147645e6fa3d331"
+                )
+            )
+        )
+        == "35d5d2f545d9511a"
+    )
 
 
 
 
 def test_streaming_xxh64():
 def test_streaming_xxh64():
     hasher = checksums.StreamingXXH64(123)
     hasher = checksums.StreamingXXH64(123)
-    hasher.update(b'te')
-    hasher.update(b'st')
-    assert bin_to_hex(hasher.digest()) == hasher.hexdigest() == '2b81b9401bef86cf'
+    hasher.update(b"te")
+    hasher.update(b"st")
+    assert bin_to_hex(hasher.digest()) == hasher.hexdigest() == "2b81b9401bef86cf"

+ 69 - 53
src/borg/testsuite/chunker.py

@@ -12,113 +12,129 @@ def cf(chunks):
     """chunk filter"""
     """chunk filter"""
     # this is to simplify testing: either return the data piece (bytes) or the hole length (int).
     # this is to simplify testing: either return the data piece (bytes) or the hole length (int).
     def _cf(chunk):
     def _cf(chunk):
-        if chunk.meta['allocation'] == CH_DATA:
-            assert len(chunk.data) == chunk.meta['size']
+        if chunk.meta["allocation"] == CH_DATA:
+            assert len(chunk.data) == chunk.meta["size"]
             return bytes(chunk.data)  # make sure we have bytes, not memoryview
             return bytes(chunk.data)  # make sure we have bytes, not memoryview
-        if chunk.meta['allocation'] in (CH_HOLE, CH_ALLOC):
+        if chunk.meta["allocation"] in (CH_HOLE, CH_ALLOC):
             assert chunk.data is None
             assert chunk.data is None
-            return chunk.meta['size']
+            return chunk.meta["size"]
         assert False, "unexpected allocation value"
         assert False, "unexpected allocation value"
+
     return [_cf(chunk) for chunk in chunks]
     return [_cf(chunk) for chunk in chunks]
 
 
 
 
 class ChunkerFixedTestCase(BaseTestCase):
 class ChunkerFixedTestCase(BaseTestCase):
-
     def test_chunkify_just_blocks(self):
     def test_chunkify_just_blocks(self):
-        data = b'foobar' * 1500
+        data = b"foobar" * 1500
         chunker = ChunkerFixed(4096)
         chunker = ChunkerFixed(4096)
         parts = cf(chunker.chunkify(BytesIO(data)))
         parts = cf(chunker.chunkify(BytesIO(data)))
         self.assert_equal(parts, [data[0:4096], data[4096:8192], data[8192:]])
         self.assert_equal(parts, [data[0:4096], data[4096:8192], data[8192:]])
 
 
     def test_chunkify_header_and_blocks(self):
     def test_chunkify_header_and_blocks(self):
-        data = b'foobar' * 1500
+        data = b"foobar" * 1500
         chunker = ChunkerFixed(4096, 123)
         chunker = ChunkerFixed(4096, 123)
         parts = cf(chunker.chunkify(BytesIO(data)))
         parts = cf(chunker.chunkify(BytesIO(data)))
-        self.assert_equal(parts, [data[0:123], data[123:123+4096], data[123+4096:123+8192], data[123+8192:]])
+        self.assert_equal(
+            parts, [data[0:123], data[123 : 123 + 4096], data[123 + 4096 : 123 + 8192], data[123 + 8192 :]]
+        )
 
 
     def test_chunkify_just_blocks_fmap_complete(self):
     def test_chunkify_just_blocks_fmap_complete(self):
-        data = b'foobar' * 1500
+        data = b"foobar" * 1500
         chunker = ChunkerFixed(4096)
         chunker = ChunkerFixed(4096)
-        fmap = [
-            (0, 4096, True),
-            (4096, 8192, True),
-            (8192, 99999999, True),
-        ]
+        fmap = [(0, 4096, True), (4096, 8192, True), (8192, 99999999, True)]
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         self.assert_equal(parts, [data[0:4096], data[4096:8192], data[8192:]])
         self.assert_equal(parts, [data[0:4096], data[4096:8192], data[8192:]])
 
 
     def test_chunkify_header_and_blocks_fmap_complete(self):
     def test_chunkify_header_and_blocks_fmap_complete(self):
-        data = b'foobar' * 1500
+        data = b"foobar" * 1500
         chunker = ChunkerFixed(4096, 123)
         chunker = ChunkerFixed(4096, 123)
-        fmap = [
-            (0, 123, True),
-            (123, 4096, True),
-            (123+4096, 4096, True),
-            (123+8192, 4096, True),
-        ]
+        fmap = [(0, 123, True), (123, 4096, True), (123 + 4096, 4096, True), (123 + 8192, 4096, True)]
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
-        self.assert_equal(parts, [data[0:123], data[123:123+4096], data[123+4096:123+8192], data[123+8192:]])
+        self.assert_equal(
+            parts, [data[0:123], data[123 : 123 + 4096], data[123 + 4096 : 123 + 8192], data[123 + 8192 :]]
+        )
 
 
     def test_chunkify_header_and_blocks_fmap_zeros(self):
     def test_chunkify_header_and_blocks_fmap_zeros(self):
-        data = b'H' * 123 + b'_' * 4096 + b'X' * 4096 + b'_' * 4096
+        data = b"H" * 123 + b"_" * 4096 + b"X" * 4096 + b"_" * 4096
         chunker = ChunkerFixed(4096, 123)
         chunker = ChunkerFixed(4096, 123)
-        fmap = [
-            (0, 123, True),
-            (123, 4096, False),
-            (123+4096, 4096, True),
-            (123+8192, 4096, False),
-        ]
+        fmap = [(0, 123, True), (123, 4096, False), (123 + 4096, 4096, True), (123 + 8192, 4096, False)]
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         # because we marked the '_' ranges as holes, we will get hole ranges instead!
         # because we marked the '_' ranges as holes, we will get hole ranges instead!
-        self.assert_equal(parts, [data[0:123], 4096, data[123+4096:123+8192], 4096])
+        self.assert_equal(parts, [data[0:123], 4096, data[123 + 4096 : 123 + 8192], 4096])
 
 
     def test_chunkify_header_and_blocks_fmap_partial(self):
     def test_chunkify_header_and_blocks_fmap_partial(self):
-        data = b'H' * 123 + b'_' * 4096 + b'X' * 4096 + b'_' * 4096
+        data = b"H" * 123 + b"_" * 4096 + b"X" * 4096 + b"_" * 4096
         chunker = ChunkerFixed(4096, 123)
         chunker = ChunkerFixed(4096, 123)
         fmap = [
         fmap = [
             (0, 123, True),
             (0, 123, True),
             # (123, 4096, False),
             # (123, 4096, False),
-            (123+4096, 4096, True),
+            (123 + 4096, 4096, True),
             # (123+8192, 4096, False),
             # (123+8192, 4096, False),
         ]
         ]
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap))
         # because we left out the '_' ranges from the fmap, we will not get them at all!
         # because we left out the '_' ranges from the fmap, we will not get them at all!
-        self.assert_equal(parts, [data[0:123], data[123+4096:123+8192]])
+        self.assert_equal(parts, [data[0:123], data[123 + 4096 : 123 + 8192]])
 
 
 
 
 class ChunkerTestCase(BaseTestCase):
 class ChunkerTestCase(BaseTestCase):
-
     def test_chunkify(self):
     def test_chunkify(self):
-        data = b'0' * int(1.5 * (1 << CHUNK_MAX_EXP)) + b'Y'
+        data = b"0" * int(1.5 * (1 << CHUNK_MAX_EXP)) + b"Y"
         parts = cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(data)))
         parts = cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(data)))
         self.assert_equal(len(parts), 2)
         self.assert_equal(len(parts), 2)
-        self.assert_equal(b''.join(parts), data)
-        self.assert_equal(cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b''))), [])
-        self.assert_equal(cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'fooba', b'rboobaz', b'fooba', b'rboobaz', b'fooba', b'rboobaz'])
-        self.assert_equal(cf(Chunker(1, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'fo', b'obarb', b'oob', b'azf', b'oobarb', b'oob', b'azf', b'oobarb', b'oobaz'])
-        self.assert_equal(cf(Chunker(2, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foob', b'ar', b'boobazfoob', b'ar', b'boobazfoob', b'ar', b'boobaz'])
-        self.assert_equal(cf(Chunker(0, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foobarboobaz' * 3])
-        self.assert_equal(cf(Chunker(1, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foobar', b'boobazfo', b'obar', b'boobazfo', b'obar', b'boobaz'])
-        self.assert_equal(cf(Chunker(2, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foob', b'arboobaz', b'foob', b'arboobaz', b'foob', b'arboobaz'])
-        self.assert_equal(cf(Chunker(0, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foobarboobaz' * 3])
-        self.assert_equal(cf(Chunker(1, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foobarbo', b'obazfoobar', b'boobazfo', b'obarboobaz'])
-        self.assert_equal(cf(Chunker(2, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b'foobarboobaz' * 3))), [b'foobarboobaz', b'foobarboobaz', b'foobarboobaz'])
+        self.assert_equal(b"".join(parts), data)
+        self.assert_equal(cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b""))), [])
+        self.assert_equal(
+            cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"fooba", b"rboobaz", b"fooba", b"rboobaz", b"fooba", b"rboobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(1, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"fo", b"obarb", b"oob", b"azf", b"oobarb", b"oob", b"azf", b"oobarb", b"oobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(2, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"foob", b"ar", b"boobazfoob", b"ar", b"boobazfoob", b"ar", b"boobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(0, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarboobaz" * 3]
+        )
+        self.assert_equal(
+            cf(Chunker(1, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"foobar", b"boobazfo", b"obar", b"boobazfo", b"obar", b"boobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(2, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"foob", b"arboobaz", b"foob", b"arboobaz", b"foob", b"arboobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(0, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarboobaz" * 3]
+        )
+        self.assert_equal(
+            cf(Chunker(1, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"foobarbo", b"obazfoobar", b"boobazfo", b"obarboobaz"],
+        )
+        self.assert_equal(
+            cf(Chunker(2, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))),
+            [b"foobarboobaz", b"foobarboobaz", b"foobarboobaz"],
+        )
 
 
     def test_buzhash(self):
     def test_buzhash(self):
-        self.assert_equal(buzhash(b'abcdefghijklmnop', 0), 3795437769)
-        self.assert_equal(buzhash(b'abcdefghijklmnop', 1), 3795400502)
-        self.assert_equal(buzhash(b'abcdefghijklmnop', 1), buzhash_update(buzhash(b'Xabcdefghijklmno', 1), ord('X'), ord('p'), 16, 1))
+        self.assert_equal(buzhash(b"abcdefghijklmnop", 0), 3795437769)
+        self.assert_equal(buzhash(b"abcdefghijklmnop", 1), 3795400502)
+        self.assert_equal(
+            buzhash(b"abcdefghijklmnop", 1), buzhash_update(buzhash(b"Xabcdefghijklmno", 1), ord("X"), ord("p"), 16, 1)
+        )
         # Test with more than 31 bytes to make sure our barrel_shift macro works correctly
         # Test with more than 31 bytes to make sure our barrel_shift macro works correctly
-        self.assert_equal(buzhash(b'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', 0), 566521248)
+        self.assert_equal(buzhash(b"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz", 0), 566521248)
 
 
     def test_small_reads(self):
     def test_small_reads(self):
         class SmallReadFile:
         class SmallReadFile:
-            input = b'a' * (20 + 1)
+            input = b"a" * (20 + 1)
 
 
             def read(self, nbytes):
             def read(self, nbytes):
                 self.input = self.input[:-1]
                 self.input = self.input[:-1]
                 return self.input[:1]
                 return self.input[:1]
 
 
         chunker = get_chunker(*CHUNKER_PARAMS, seed=0)
         chunker = get_chunker(*CHUNKER_PARAMS, seed=0)
-        reconstructed = b''.join(cf(chunker.chunkify(SmallReadFile())))
-        assert reconstructed == b'a' * 20
+        reconstructed = b"".join(cf(chunker.chunkify(SmallReadFile())))
+        assert reconstructed == b"a" * 20

+ 40 - 51
src/borg/testsuite/chunker_pytest.py

@@ -12,37 +12,27 @@ BS = 4096  # fs block size
 
 
 # some sparse files. X = content blocks, _ = sparse blocks.
 # some sparse files. X = content blocks, _ = sparse blocks.
 # X__XXX____
 # X__XXX____
-map_sparse1 = [
-    (0 * BS, 1 * BS, True),
-    (1 * BS, 2 * BS, False),
-    (3 * BS, 3 * BS, True),
-    (6 * BS, 4 * BS, False),
-]
+map_sparse1 = [(0 * BS, 1 * BS, True), (1 * BS, 2 * BS, False), (3 * BS, 3 * BS, True), (6 * BS, 4 * BS, False)]
 
 
 # _XX___XXXX
 # _XX___XXXX
-map_sparse2 = [
-    (0 * BS, 1 * BS, False),
-    (1 * BS, 2 * BS, True),
-    (3 * BS, 3 * BS, False),
-    (6 * BS, 4 * BS, True),
-]
+map_sparse2 = [(0 * BS, 1 * BS, False), (1 * BS, 2 * BS, True), (3 * BS, 3 * BS, False), (6 * BS, 4 * BS, True)]
 
 
 # XXX
 # XXX
-map_notsparse = [(0 * BS, 3 * BS, True), ]
+map_notsparse = [(0 * BS, 3 * BS, True)]
 
 
 # ___
 # ___
-map_onlysparse = [(0 * BS, 3 * BS, False), ]
+map_onlysparse = [(0 * BS, 3 * BS, False)]
 
 
 
 
 def make_sparsefile(fname, sparsemap, header_size=0):
 def make_sparsefile(fname, sparsemap, header_size=0):
-    with open(fname, 'wb') as fd:
+    with open(fname, "wb") as fd:
         total = 0
         total = 0
         if header_size:
         if header_size:
-            fd.write(b'H' * header_size)
+            fd.write(b"H" * header_size)
             total += header_size
             total += header_size
         for offset, size, is_data in sparsemap:
         for offset, size, is_data in sparsemap:
             if is_data:
             if is_data:
-                fd.write(b'X' * size)
+                fd.write(b"X" * size)
             else:
             else:
                 fd.seek(size, os.SEEK_CUR)
                 fd.seek(size, os.SEEK_CUR)
             total += size
             total += size
@@ -54,11 +44,11 @@ def make_content(sparsemap, header_size=0):
     result = []
     result = []
     total = 0
     total = 0
     if header_size:
     if header_size:
-        result.append(b'H' * header_size)
+        result.append(b"H" * header_size)
         total += header_size
         total += header_size
     for offset, size, is_data in sparsemap:
     for offset, size, is_data in sparsemap:
         if is_data:
         if is_data:
-            result.append(b'X' * size)  # bytes!
+            result.append(b"X" * size)  # bytes!
         else:
         else:
             result.append(size)  # int!
             result.append(size)  # int!
         total += size
         total += size
@@ -69,9 +59,9 @@ def fs_supports_sparse():
     if not has_seek_hole:
     if not has_seek_hole:
         return False
         return False
     with tempfile.TemporaryDirectory() as tmpdir:
     with tempfile.TemporaryDirectory() as tmpdir:
-        fn = os.path.join(tmpdir, 'test_sparse')
+        fn = os.path.join(tmpdir, "test_sparse")
         make_sparsefile(fn, [(0, BS, False), (BS, BS, True)])
         make_sparsefile(fn, [(0, BS, False), (BS, BS, True)])
-        with open(fn, 'rb') as f:
+        with open(fn, "rb") as f:
             try:
             try:
                 offset_hole = f.seek(0, os.SEEK_HOLE)
                 offset_hole = f.seek(0, os.SEEK_HOLE)
                 offset_data = f.seek(0, os.SEEK_DATA)
                 offset_data = f.seek(0, os.SEEK_DATA)
@@ -81,15 +71,12 @@ def fs_supports_sparse():
         return offset_hole == 0 and offset_data == BS
         return offset_hole == 0 and offset_data == BS
 
 
 
 
-@pytest.mark.skipif(not fs_supports_sparse(), reason='fs does not support sparse files')
-@pytest.mark.parametrize("fname, sparse_map", [
-    ('sparse1', map_sparse1),
-    ('sparse2', map_sparse2),
-    ('onlysparse', map_onlysparse),
-    ('notsparse', map_notsparse),
-])
+@pytest.mark.skipif(not fs_supports_sparse(), reason="fs does not support sparse files")
+@pytest.mark.parametrize(
+    "fname, sparse_map",
+    [("sparse1", map_sparse1), ("sparse2", map_sparse2), ("onlysparse", map_onlysparse), ("notsparse", map_notsparse)],
+)
 def test_sparsemap(tmpdir, fname, sparse_map):
 def test_sparsemap(tmpdir, fname, sparse_map):
-
     def get_sparsemap_fh(fname):
     def get_sparsemap_fh(fname):
         fh = os.open(fname, flags=os.O_RDONLY)
         fh = os.open(fname, flags=os.O_RDONLY)
         try:
         try:
@@ -98,7 +85,7 @@ def test_sparsemap(tmpdir, fname, sparse_map):
             os.close(fh)
             os.close(fh)
 
 
     def get_sparsemap_fd(fname):
     def get_sparsemap_fd(fname):
-        with open(fname, 'rb') as fd:
+        with open(fname, "rb") as fd:
             return list(sparsemap(fd=fd))
             return list(sparsemap(fd=fd))
 
 
     fn = str(tmpdir / fname)
     fn = str(tmpdir / fname)
@@ -107,30 +94,32 @@ def test_sparsemap(tmpdir, fname, sparse_map):
     assert get_sparsemap_fd(fn) == sparse_map
     assert get_sparsemap_fd(fn) == sparse_map
 
 
 
 
-@pytest.mark.skipif(not fs_supports_sparse(), reason='fs does not support sparse files')
-@pytest.mark.parametrize("fname, sparse_map, header_size, sparse", [
-    ('sparse1', map_sparse1, 0, False),
-    ('sparse1', map_sparse1, 0, True),
-    ('sparse1', map_sparse1, BS, False),
-    ('sparse1', map_sparse1, BS, True),
-    ('sparse2', map_sparse2, 0, False),
-    ('sparse2', map_sparse2, 0, True),
-    ('sparse2', map_sparse2, BS, False),
-    ('sparse2', map_sparse2, BS, True),
-    ('onlysparse', map_onlysparse, 0, False),
-    ('onlysparse', map_onlysparse, 0, True),
-    ('onlysparse', map_onlysparse, BS, False),
-    ('onlysparse', map_onlysparse, BS, True),
-    ('notsparse', map_notsparse, 0, False),
-    ('notsparse', map_notsparse, 0, True),
-    ('notsparse', map_notsparse, BS, False),
-    ('notsparse', map_notsparse, BS, True),
-])
+@pytest.mark.skipif(not fs_supports_sparse(), reason="fs does not support sparse files")
+@pytest.mark.parametrize(
+    "fname, sparse_map, header_size, sparse",
+    [
+        ("sparse1", map_sparse1, 0, False),
+        ("sparse1", map_sparse1, 0, True),
+        ("sparse1", map_sparse1, BS, False),
+        ("sparse1", map_sparse1, BS, True),
+        ("sparse2", map_sparse2, 0, False),
+        ("sparse2", map_sparse2, 0, True),
+        ("sparse2", map_sparse2, BS, False),
+        ("sparse2", map_sparse2, BS, True),
+        ("onlysparse", map_onlysparse, 0, False),
+        ("onlysparse", map_onlysparse, 0, True),
+        ("onlysparse", map_onlysparse, BS, False),
+        ("onlysparse", map_onlysparse, BS, True),
+        ("notsparse", map_notsparse, 0, False),
+        ("notsparse", map_notsparse, 0, True),
+        ("notsparse", map_notsparse, BS, False),
+        ("notsparse", map_notsparse, BS, True),
+    ],
+)
 def test_chunkify_sparse(tmpdir, fname, sparse_map, header_size, sparse):
 def test_chunkify_sparse(tmpdir, fname, sparse_map, header_size, sparse):
-
     def get_chunks(fname, sparse, header_size):
     def get_chunks(fname, sparse, header_size):
         chunker = ChunkerFixed(4096, header_size=header_size, sparse=sparse)
         chunker = ChunkerFixed(4096, header_size=header_size, sparse=sparse)
-        with open(fname, 'rb') as fd:
+        with open(fname, "rb") as fd:
             return cf(chunker.chunkify(fd))
             return cf(chunker.chunkify(fd))
 
 
     fn = str(tmpdir / fname)
     fn = str(tmpdir / fname)

+ 3 - 4
src/borg/testsuite/chunker_slow.py

@@ -9,7 +9,6 @@ from . import BaseTestCase
 
 
 
 
 class ChunkerRegressionTestCase(BaseTestCase):
 class ChunkerRegressionTestCase(BaseTestCase):
-
     def test_chunkpoints_unchanged(self):
     def test_chunkpoints_unchanged(self):
         def twist(size):
         def twist(size):
             x = 1
             x = 1
@@ -31,10 +30,10 @@ class ChunkerRegressionTestCase(BaseTestCase):
                         for seed in (1849058162, 1234567653):
                         for seed in (1849058162, 1234567653):
                             fh = BytesIO(data)
                             fh = BytesIO(data)
                             chunker = Chunker(seed, minexp, maxexp, maskbits, winsize)
                             chunker = Chunker(seed, minexp, maxexp, maskbits, winsize)
-                            chunks = [blake2b_256(b'', c) for c in cf(chunker.chunkify(fh, -1))]
-                            runs.append(blake2b_256(b'', b''.join(chunks)))
+                            chunks = [blake2b_256(b"", c) for c in cf(chunker.chunkify(fh, -1))]
+                            runs.append(blake2b_256(b"", b"".join(chunks)))
 
 
         # The "correct" hash below matches the existing chunker behavior.
         # The "correct" hash below matches the existing chunker behavior.
         # Future chunker optimisations must not change this, or existing repos will bloat.
         # Future chunker optimisations must not change this, or existing repos will bloat.
-        overall_hash = blake2b_256(b'', b''.join(runs))
+        overall_hash = blake2b_256(b"", b"".join(runs))
         self.assert_equal(overall_hash, unhexlify("b559b0ac8df8daaa221201d018815114241ea5c6609d98913cd2246a702af4e3"))
         self.assert_equal(overall_hash, unhexlify("b559b0ac8df8daaa221201d018815114241ea5c6609d98913cd2246a702af4e3"))

+ 50 - 49
src/borg/testsuite/compress.py

@@ -1,5 +1,6 @@
 import os
 import os
 import zlib
 import zlib
+
 try:
 try:
     import lzma
     import lzma
 except ImportError:
 except ImportError:
@@ -11,23 +12,23 @@ from ..compress import get_compressor, Compressor, CompressionSpec, CNONE, ZLIB,
 
 
 
 
 buffer = bytes(2**16)
 buffer = bytes(2**16)
-data = b'fooooooooobaaaaaaaar' * 10
-params = dict(name='zlib', level=6)
+data = b"fooooooooobaaaaaaaar" * 10
+params = dict(name="zlib", level=6)
 
 
 
 
 def test_get_compressor():
 def test_get_compressor():
-    c = get_compressor(name='none')
+    c = get_compressor(name="none")
     assert isinstance(c, CNONE)
     assert isinstance(c, CNONE)
-    c = get_compressor(name='lz4')
+    c = get_compressor(name="lz4")
     assert isinstance(c, LZ4)
     assert isinstance(c, LZ4)
-    c = get_compressor(name='zlib')
+    c = get_compressor(name="zlib")
     assert isinstance(c, ZLIB)
     assert isinstance(c, ZLIB)
     with pytest.raises(KeyError):
     with pytest.raises(KeyError):
-        get_compressor(name='foobar')
+        get_compressor(name="foobar")
 
 
 
 
 def test_cnull():
 def test_cnull():
-    c = get_compressor(name='none')
+    c = get_compressor(name="none")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) > len(data)
     assert len(cdata) > len(data)
     assert data in cdata  # it's not compressed and just in there 1:1
     assert data in cdata  # it's not compressed and just in there 1:1
@@ -36,7 +37,7 @@ def test_cnull():
 
 
 
 
 def test_lz4():
 def test_lz4():
-    c = get_compressor(name='lz4')
+    c = get_compressor(name="lz4")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) < len(data)
     assert len(cdata) < len(data)
     assert data == c.decompress(cdata)
     assert data == c.decompress(cdata)
@@ -45,18 +46,18 @@ def test_lz4():
 
 
 def test_lz4_buffer_allocation(monkeypatch):
 def test_lz4_buffer_allocation(monkeypatch):
     # disable fallback to no compression on incompressible data
     # disable fallback to no compression on incompressible data
-    monkeypatch.setattr(LZ4, 'decide', lambda always_compress: LZ4)
+    monkeypatch.setattr(LZ4, "decide", lambda always_compress: LZ4)
     # test with a rather huge data object to see if buffer allocation / resizing works
     # test with a rather huge data object to see if buffer allocation / resizing works
     data = os.urandom(5 * 2**20) * 10  # 50MiB badly compressible data
     data = os.urandom(5 * 2**20) * 10  # 50MiB badly compressible data
     assert len(data) == 50 * 2**20
     assert len(data) == 50 * 2**20
-    c = Compressor('lz4')
+    c = Compressor("lz4")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) > len(data)
     assert len(cdata) > len(data)
     assert data == c.decompress(cdata)
     assert data == c.decompress(cdata)
 
 
 
 
 def test_zlib():
 def test_zlib():
-    c = get_compressor(name='zlib')
+    c = get_compressor(name="zlib")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) < len(data)
     assert len(cdata) < len(data)
     assert data == c.decompress(cdata)
     assert data == c.decompress(cdata)
@@ -66,7 +67,7 @@ def test_zlib():
 def test_lzma():
 def test_lzma():
     if lzma is None:
     if lzma is None:
         pytest.skip("No lzma support found.")
         pytest.skip("No lzma support found.")
-    c = get_compressor(name='lzma')
+    c = get_compressor(name="lzma")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) < len(data)
     assert len(cdata) < len(data)
     assert data == c.decompress(cdata)
     assert data == c.decompress(cdata)
@@ -74,7 +75,7 @@ def test_lzma():
 
 
 
 
 def test_zstd():
 def test_zstd():
-    c = get_compressor(name='zstd')
+    c = get_compressor(name="zstd")
     cdata = c.compress(data)
     cdata = c.compress(data)
     assert len(cdata) < len(data)
     assert len(cdata) < len(data)
     assert data == c.decompress(cdata)
     assert data == c.decompress(cdata)
@@ -83,16 +84,16 @@ def test_zstd():
 
 
 def test_autodetect_invalid():
 def test_autodetect_invalid():
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        Compressor(**params).decompress(b'\xff\xfftotalcrap')
+        Compressor(**params).decompress(b"\xff\xfftotalcrap")
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        Compressor(**params).decompress(b'\x08\x00notreallyzlib')
+        Compressor(**params).decompress(b"\x08\x00notreallyzlib")
 
 
 
 
 def test_zlib_legacy_compat():
 def test_zlib_legacy_compat():
     # for compatibility reasons, we do not add an extra header for zlib,
     # for compatibility reasons, we do not add an extra header for zlib,
     # nor do we expect one when decompressing / autodetecting
     # nor do we expect one when decompressing / autodetecting
     for level in range(10):
     for level in range(10):
-        c = get_compressor(name='zlib_legacy', level=level)
+        c = get_compressor(name="zlib_legacy", level=level)
         cdata1 = c.compress(data)
         cdata1 = c.compress(data)
         cdata2 = zlib.compress(data, level)
         cdata2 = zlib.compress(data, level)
         assert cdata1 == cdata2
         assert cdata1 == cdata2
@@ -104,19 +105,19 @@ def test_zlib_legacy_compat():
 
 
 def test_compressor():
 def test_compressor():
     params_list = [
     params_list = [
-        dict(name='none'),
-        dict(name='lz4'),
-        dict(name='zstd', level=1),
-        dict(name='zstd', level=3),
+        dict(name="none"),
+        dict(name="lz4"),
+        dict(name="zstd", level=1),
+        dict(name="zstd", level=3),
         # avoiding high zstd levels, memory needs unclear
         # avoiding high zstd levels, memory needs unclear
-        dict(name='zlib', level=0),
-        dict(name='zlib', level=6),
-        dict(name='zlib', level=9),
+        dict(name="zlib", level=0),
+        dict(name="zlib", level=6),
+        dict(name="zlib", level=9),
     ]
     ]
     if lzma:
     if lzma:
         params_list += [
         params_list += [
-            dict(name='lzma', level=0),
-            dict(name='lzma', level=6),
+            dict(name="lzma", level=0),
+            dict(name="lzma", level=6),
             # we do not test lzma on level 9 because of the huge memory needs
             # we do not test lzma on level 9 because of the huge memory needs
         ]
         ]
     for params in params_list:
     for params in params_list:
@@ -125,9 +126,9 @@ def test_compressor():
 
 
 
 
 def test_auto():
 def test_auto():
-    compressor_auto_zlib = CompressionSpec('auto,zlib,9').compressor
-    compressor_lz4 = CompressionSpec('lz4').compressor
-    compressor_zlib = CompressionSpec('zlib,9').compressor
+    compressor_auto_zlib = CompressionSpec("auto,zlib,9").compressor
+    compressor_lz4 = CompressionSpec("lz4").compressor
+    compressor_zlib = CompressionSpec("zlib,9").compressor
     data = bytes(500)
     data = bytes(500)
     compressed_auto_zlib = compressor_auto_zlib.compress(data)
     compressed_auto_zlib = compressor_auto_zlib.compress(data)
     compressed_lz4 = compressor_lz4.compress(data)
     compressed_lz4 = compressor_lz4.compress(data)
@@ -135,13 +136,13 @@ def test_auto():
     ratio = len(compressed_zlib) / len(compressed_lz4)
     ratio = len(compressed_zlib) / len(compressed_lz4)
     assert Compressor.detect(compressed_auto_zlib)[0] == ZLIB if ratio < 0.99 else LZ4
     assert Compressor.detect(compressed_auto_zlib)[0] == ZLIB if ratio < 0.99 else LZ4
 
 
-    data = b'\x00\xb8\xa3\xa2-O\xe1i\xb6\x12\x03\xc21\xf3\x8a\xf78\\\x01\xa5b\x07\x95\xbeE\xf8\xa3\x9ahm\xb1~'
+    data = b"\x00\xb8\xa3\xa2-O\xe1i\xb6\x12\x03\xc21\xf3\x8a\xf78\\\x01\xa5b\x07\x95\xbeE\xf8\xa3\x9ahm\xb1~"
     compressed = compressor_auto_zlib.compress(data)
     compressed = compressor_auto_zlib.compress(data)
     assert Compressor.detect(compressed)[0] == CNONE
     assert Compressor.detect(compressed)[0] == CNONE
 
 
 
 
 def test_obfuscate():
 def test_obfuscate():
-    compressor = CompressionSpec('obfuscate,1,none').compressor
+    compressor = CompressionSpec("obfuscate,1,none").compressor
     data = bytes(10000)
     data = bytes(10000)
     compressed = compressor.compress(data)
     compressed = compressor.compress(data)
     # 2 id bytes compression, 2 id bytes obfuscator. 4 length bytes
     # 2 id bytes compression, 2 id bytes obfuscator. 4 length bytes
@@ -149,7 +150,7 @@ def test_obfuscate():
     # compressing 100 times the same data should give at least 50 different result sizes
     # compressing 100 times the same data should give at least 50 different result sizes
     assert len({len(compressor.compress(data)) for i in range(100)}) > 50
     assert len({len(compressor.compress(data)) for i in range(100)}) > 50
 
 
-    cs = CompressionSpec('obfuscate,2,lz4')
+    cs = CompressionSpec("obfuscate,2,lz4")
     assert isinstance(cs.inner.compressor, LZ4)
     assert isinstance(cs.inner.compressor, LZ4)
     compressor = cs.compressor
     compressor = cs.compressor
     data = bytes(10000)
     data = bytes(10000)
@@ -160,7 +161,7 @@ def test_obfuscate():
     # compressing 100 times the same data should give multiple different result sizes
     # compressing 100 times the same data should give multiple different result sizes
     assert len({len(compressor.compress(data)) for i in range(100)}) > 10
     assert len({len(compressor.compress(data)) for i in range(100)}) > 10
 
 
-    cs = CompressionSpec('obfuscate,6,zstd,3')
+    cs = CompressionSpec("obfuscate,6,zstd,3")
     assert isinstance(cs.inner.compressor, ZSTD)
     assert isinstance(cs.inner.compressor, ZSTD)
     compressor = cs.compressor
     compressor = cs.compressor
     data = bytes(10000)
     data = bytes(10000)
@@ -171,7 +172,7 @@ def test_obfuscate():
     # compressing 100 times the same data should give multiple different result sizes
     # compressing 100 times the same data should give multiple different result sizes
     assert len({len(compressor.compress(data)) for i in range(100)}) > 90
     assert len({len(compressor.compress(data)) for i in range(100)}) > 90
 
 
-    cs = CompressionSpec('obfuscate,2,auto,zstd,10')
+    cs = CompressionSpec("obfuscate,2,auto,zstd,10")
     assert isinstance(cs.inner.compressor, Auto)
     assert isinstance(cs.inner.compressor, Auto)
     compressor = cs.compressor
     compressor = cs.compressor
     data = bytes(10000)
     data = bytes(10000)
@@ -182,7 +183,7 @@ def test_obfuscate():
     # compressing 100 times the same data should give multiple different result sizes
     # compressing 100 times the same data should give multiple different result sizes
     assert len({len(compressor.compress(data)) for i in range(100)}) > 10
     assert len({len(compressor.compress(data)) for i in range(100)}) > 10
 
 
-    cs = CompressionSpec('obfuscate,110,none')
+    cs = CompressionSpec("obfuscate,110,none")
     assert isinstance(cs.inner.compressor, CNONE)
     assert isinstance(cs.inner.compressor, CNONE)
     compressor = cs.compressor
     compressor = cs.compressor
     data = bytes(1000)
     data = bytes(1000)
@@ -199,44 +200,44 @@ def test_obfuscate():
 
 
 def test_compression_specs():
 def test_compression_specs():
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        CompressionSpec('')
+        CompressionSpec("")
 
 
-    assert isinstance(CompressionSpec('none').compressor, CNONE)
-    assert isinstance(CompressionSpec('lz4').compressor, LZ4)
+    assert isinstance(CompressionSpec("none").compressor, CNONE)
+    assert isinstance(CompressionSpec("lz4").compressor, LZ4)
 
 
-    zlib = CompressionSpec('zlib').compressor
+    zlib = CompressionSpec("zlib").compressor
     assert isinstance(zlib, ZLIB)
     assert isinstance(zlib, ZLIB)
     assert zlib.level == 6
     assert zlib.level == 6
-    zlib = CompressionSpec('zlib,0').compressor
+    zlib = CompressionSpec("zlib,0").compressor
     assert isinstance(zlib, ZLIB)
     assert isinstance(zlib, ZLIB)
     assert zlib.level == 0
     assert zlib.level == 0
-    zlib = CompressionSpec('zlib,9').compressor
+    zlib = CompressionSpec("zlib,9").compressor
     assert isinstance(zlib, ZLIB)
     assert isinstance(zlib, ZLIB)
     assert zlib.level == 9
     assert zlib.level == 9
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        CompressionSpec('zlib,9,invalid')
+        CompressionSpec("zlib,9,invalid")
 
 
-    lzma = CompressionSpec('lzma').compressor
+    lzma = CompressionSpec("lzma").compressor
     assert isinstance(lzma, LZMA)
     assert isinstance(lzma, LZMA)
     assert lzma.level == 6
     assert lzma.level == 6
-    lzma = CompressionSpec('lzma,0').compressor
+    lzma = CompressionSpec("lzma,0").compressor
     assert isinstance(lzma, LZMA)
     assert isinstance(lzma, LZMA)
     assert lzma.level == 0
     assert lzma.level == 0
-    lzma = CompressionSpec('lzma,9').compressor
+    lzma = CompressionSpec("lzma,9").compressor
     assert isinstance(lzma, LZMA)
     assert isinstance(lzma, LZMA)
     assert lzma.level == 9
     assert lzma.level == 9
 
 
-    zstd = CompressionSpec('zstd').compressor
+    zstd = CompressionSpec("zstd").compressor
     assert isinstance(zstd, ZSTD)
     assert isinstance(zstd, ZSTD)
     assert zstd.level == 3
     assert zstd.level == 3
-    zstd = CompressionSpec('zstd,1').compressor
+    zstd = CompressionSpec("zstd,1").compressor
     assert isinstance(zstd, ZSTD)
     assert isinstance(zstd, ZSTD)
     assert zstd.level == 1
     assert zstd.level == 1
-    zstd = CompressionSpec('zstd,22').compressor
+    zstd = CompressionSpec("zstd,22").compressor
     assert isinstance(zstd, ZSTD)
     assert isinstance(zstd, ZSTD)
     assert zstd.level == 22
     assert zstd.level == 22
 
 
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        CompressionSpec('lzma,9,invalid')
+        CompressionSpec("lzma,9,invalid")
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        CompressionSpec('invalid')
+        CompressionSpec("invalid")

+ 128 - 113
src/borg/testsuite/crypto.py

@@ -16,18 +16,17 @@ from . import BaseTestCase
 
 
 
 
 class CryptoTestCase(BaseTestCase):
 class CryptoTestCase(BaseTestCase):
-
     def test_bytes_to_int(self):
     def test_bytes_to_int(self):
-        self.assert_equal(bytes_to_int(b'\0\0\0\1'), 1)
+        self.assert_equal(bytes_to_int(b"\0\0\0\1"), 1)
 
 
     def test_bytes_to_long(self):
     def test_bytes_to_long(self):
-        self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1)
-        self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1')
+        self.assert_equal(bytes_to_long(b"\0\0\0\0\0\0\0\1"), 1)
+        self.assert_equal(long_to_bytes(1), b"\0\0\0\0\0\0\0\1")
 
 
     def test_UNENCRYPTED(self):
     def test_UNENCRYPTED(self):
-        iv = b''  # any IV is ok, it just must be set and not None
-        data = b'data'
-        header = b'header'
+        iv = b""  # any IV is ok, it just must be set and not None
+        data = b"data"
+        header = b"header"
         cs = UNENCRYPTED(None, None, iv, header_len=6)
         cs = UNENCRYPTED(None, None, iv, header_len=6)
         envelope = cs.encrypt(data, header=header)
         envelope = cs.encrypt(data, header=header)
         self.assert_equal(envelope, header + data)
         self.assert_equal(envelope, header + data)
@@ -36,11 +35,11 @@ class CryptoTestCase(BaseTestCase):
 
 
     def test_AES256_CTR_HMAC_SHA256(self):
     def test_AES256_CTR_HMAC_SHA256(self):
         # this tests the layout as in attic / borg < 1.2 (1 type byte, no aad)
         # this tests the layout as in attic / borg < 1.2 (1 type byte, no aad)
-        mac_key = b'Y' * 32
-        enc_key = b'X' * 32
+        mac_key = b"Y" * 32
+        enc_key = b"X" * 32
         iv = 0
         iv = 0
-        data = b'foo' * 10
-        header = b'\x42'
+        data = b"foo" * 10
+        header = b"\x42"
         # encrypt-then-mac
         # encrypt-then-mac
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, iv, header_len=1, aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, iv, header_len=1, aad_offset=1)
         hdr_mac_iv_cdata = cs.encrypt(data, header=header)
         hdr_mac_iv_cdata = cs.encrypt(data, header=header)
@@ -48,10 +47,10 @@ class CryptoTestCase(BaseTestCase):
         mac = hdr_mac_iv_cdata[1:33]
         mac = hdr_mac_iv_cdata[1:33]
         iv = hdr_mac_iv_cdata[33:41]
         iv = hdr_mac_iv_cdata[33:41]
         cdata = hdr_mac_iv_cdata[41:]
         cdata = hdr_mac_iv_cdata[41:]
-        self.assert_equal(hexlify(hdr), b'42')
-        self.assert_equal(hexlify(mac), b'af90b488b0cc4a8f768fe2d6814fa65aec66b148135e54f7d4d29a27f22f57a8')
-        self.assert_equal(hexlify(iv), b'0000000000000000')
-        self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')
+        self.assert_equal(hexlify(hdr), b"42")
+        self.assert_equal(hexlify(mac), b"af90b488b0cc4a8f768fe2d6814fa65aec66b148135e54f7d4d29a27f22f57a8")
+        self.assert_equal(hexlify(iv), b"0000000000000000")
+        self.assert_equal(hexlify(cdata), b"c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466")
         self.assert_equal(cs.next_iv(), 2)
         self.assert_equal(cs.next_iv(), 2)
         # auth-then-decrypt
         # auth-then-decrypt
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
@@ -60,16 +59,15 @@ class CryptoTestCase(BaseTestCase):
         self.assert_equal(cs.next_iv(), 2)
         self.assert_equal(cs.next_iv(), 2)
         # auth-failure due to corruption (corrupted data)
         # auth-failure due to corruption (corrupted data)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
-        hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:41] + b'\0' + hdr_mac_iv_cdata[42:]
-        self.assert_raises(IntegrityError,
-                           lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
+        hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:41] + b"\0" + hdr_mac_iv_cdata[42:]
+        self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
 
 
     def test_AES256_CTR_HMAC_SHA256_aad(self):
     def test_AES256_CTR_HMAC_SHA256_aad(self):
-        mac_key = b'Y' * 32
-        enc_key = b'X' * 32
+        mac_key = b"Y" * 32
+        enc_key = b"X" * 32
         iv = 0
         iv = 0
-        data = b'foo' * 10
-        header = b'\x12\x34\x56'
+        data = b"foo" * 10
+        header = b"\x12\x34\x56"
         # encrypt-then-mac
         # encrypt-then-mac
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, iv, header_len=3, aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, iv, header_len=3, aad_offset=1)
         hdr_mac_iv_cdata = cs.encrypt(data, header=header)
         hdr_mac_iv_cdata = cs.encrypt(data, header=header)
@@ -77,10 +75,10 @@ class CryptoTestCase(BaseTestCase):
         mac = hdr_mac_iv_cdata[3:35]
         mac = hdr_mac_iv_cdata[3:35]
         iv = hdr_mac_iv_cdata[35:43]
         iv = hdr_mac_iv_cdata[35:43]
         cdata = hdr_mac_iv_cdata[43:]
         cdata = hdr_mac_iv_cdata[43:]
-        self.assert_equal(hexlify(hdr), b'123456')
-        self.assert_equal(hexlify(mac), b'7659a915d9927072ef130258052351a17ef882692893c3850dd798c03d2dd138')
-        self.assert_equal(hexlify(iv), b'0000000000000000')
-        self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')
+        self.assert_equal(hexlify(hdr), b"123456")
+        self.assert_equal(hexlify(mac), b"7659a915d9927072ef130258052351a17ef882692893c3850dd798c03d2dd138")
+        self.assert_equal(hexlify(iv), b"0000000000000000")
+        self.assert_equal(hexlify(cdata), b"c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466")
         self.assert_equal(cs.next_iv(), 2)
         self.assert_equal(cs.next_iv(), 2)
         # auth-then-decrypt
         # auth-then-decrypt
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
@@ -89,24 +87,27 @@ class CryptoTestCase(BaseTestCase):
         self.assert_equal(cs.next_iv(), 2)
         self.assert_equal(cs.next_iv(), 2)
         # auth-failure due to corruption (corrupted aad)
         # auth-failure due to corruption (corrupted aad)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
         cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1)
-        hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b'\0' + hdr_mac_iv_cdata[2:]
-        self.assert_raises(IntegrityError,
-                           lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
+        hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b"\0" + hdr_mac_iv_cdata[2:]
+        self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
 
 
     def test_AE(self):
     def test_AE(self):
         # used in legacy-like layout (1 type byte, no aad)
         # used in legacy-like layout (1 type byte, no aad)
-        key = b'X' * 32
+        key = b"X" * 32
         iv_int = 0
         iv_int = 0
-        data = b'foo' * 10
-        header = b'\x23' + iv_int.to_bytes(12, 'big')
+        data = b"foo" * 10
+        header = b"\x23" + iv_int.to_bytes(12, "big")
         tests = [
         tests = [
             # (ciphersuite class, exp_mac, exp_cdata)
             # (ciphersuite class, exp_mac, exp_cdata)
-            (AES256_OCB,
-             b'b6909c23c9aaebd9abbe1ff42097652d',
-             b'877ce46d2f62dee54699cebc3ba41d9ab613f7c486778c1b3636664b1493', ),
-            (CHACHA20_POLY1305,
-             b'fd08594796e0706cde1e8b461e3e0555',
-             b'a093e4b0387526f085d3c40cca84a35230a5c0dd766453b77ba38bcff775', )
+            (
+                AES256_OCB,
+                b"b6909c23c9aaebd9abbe1ff42097652d",
+                b"877ce46d2f62dee54699cebc3ba41d9ab613f7c486778c1b3636664b1493",
+            ),
+            (
+                CHACHA20_POLY1305,
+                b"fd08594796e0706cde1e8b461e3e0555",
+                b"a093e4b0387526f085d3c40cca84a35230a5c0dd766453b77ba38bcff775",
+            ),
         ]
         ]
         for cs_cls, exp_mac, exp_cdata in tests:
         for cs_cls, exp_mac, exp_cdata in tests:
             # print(repr(cs_cls))
             # print(repr(cs_cls))
@@ -117,9 +118,9 @@ class CryptoTestCase(BaseTestCase):
             iv = hdr_mac_iv_cdata[1:13]
             iv = hdr_mac_iv_cdata[1:13]
             mac = hdr_mac_iv_cdata[13:29]
             mac = hdr_mac_iv_cdata[13:29]
             cdata = hdr_mac_iv_cdata[29:]
             cdata = hdr_mac_iv_cdata[29:]
-            self.assert_equal(hexlify(hdr), b'23')
+            self.assert_equal(hexlify(hdr), b"23")
             self.assert_equal(hexlify(mac), exp_mac)
             self.assert_equal(hexlify(mac), exp_mac)
-            self.assert_equal(hexlify(iv), b'000000000000000000000000')
+            self.assert_equal(hexlify(iv), b"000000000000000000000000")
             self.assert_equal(hexlify(cdata), exp_cdata)
             self.assert_equal(hexlify(cdata), exp_cdata)
             self.assert_equal(cs.next_iv(), 1)
             self.assert_equal(cs.next_iv(), 1)
             # auth/decrypt
             # auth/decrypt
@@ -129,24 +130,27 @@ class CryptoTestCase(BaseTestCase):
             self.assert_equal(cs.next_iv(), 1)
             self.assert_equal(cs.next_iv(), 1)
             # auth-failure due to corruption (corrupted data)
             # auth-failure due to corruption (corrupted data)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=1)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=1)
-            hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:29] + b'\0' + hdr_mac_iv_cdata[30:]
-            self.assert_raises(IntegrityError,
-                               lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
+            hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:29] + b"\0" + hdr_mac_iv_cdata[30:]
+            self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
 
 
     def test_AEAD(self):
     def test_AEAD(self):
         # test with aad
         # test with aad
-        key = b'X' * 32
+        key = b"X" * 32
         iv_int = 0
         iv_int = 0
-        data = b'foo' * 10
-        header = b'\x12\x34\x56' + iv_int.to_bytes(12, 'big')
+        data = b"foo" * 10
+        header = b"\x12\x34\x56" + iv_int.to_bytes(12, "big")
         tests = [
         tests = [
             # (ciphersuite class, exp_mac, exp_cdata)
             # (ciphersuite class, exp_mac, exp_cdata)
-            (AES256_OCB,
-             b'f2748c412af1c7ead81863a18c2c1893',
-             b'877ce46d2f62dee54699cebc3ba41d9ab613f7c486778c1b3636664b1493', ),
-            (CHACHA20_POLY1305,
-             b'b7e7c9a79f2404e14f9aad156bf091dd',
-             b'a093e4b0387526f085d3c40cca84a35230a5c0dd766453b77ba38bcff775', )
+            (
+                AES256_OCB,
+                b"f2748c412af1c7ead81863a18c2c1893",
+                b"877ce46d2f62dee54699cebc3ba41d9ab613f7c486778c1b3636664b1493",
+            ),
+            (
+                CHACHA20_POLY1305,
+                b"b7e7c9a79f2404e14f9aad156bf091dd",
+                b"a093e4b0387526f085d3c40cca84a35230a5c0dd766453b77ba38bcff775",
+            ),
         ]
         ]
         for cs_cls, exp_mac, exp_cdata in tests:
         for cs_cls, exp_mac, exp_cdata in tests:
             # print(repr(cs_cls))
             # print(repr(cs_cls))
@@ -157,9 +161,9 @@ class CryptoTestCase(BaseTestCase):
             iv = hdr_mac_iv_cdata[3:15]
             iv = hdr_mac_iv_cdata[3:15]
             mac = hdr_mac_iv_cdata[15:31]
             mac = hdr_mac_iv_cdata[15:31]
             cdata = hdr_mac_iv_cdata[31:]
             cdata = hdr_mac_iv_cdata[31:]
-            self.assert_equal(hexlify(hdr), b'123456')
+            self.assert_equal(hexlify(hdr), b"123456")
             self.assert_equal(hexlify(mac), exp_mac)
             self.assert_equal(hexlify(mac), exp_mac)
-            self.assert_equal(hexlify(iv), b'000000000000000000000000')
+            self.assert_equal(hexlify(iv), b"000000000000000000000000")
             self.assert_equal(hexlify(cdata), exp_cdata)
             self.assert_equal(hexlify(cdata), exp_cdata)
             self.assert_equal(cs.next_iv(), 1)
             self.assert_equal(cs.next_iv(), 1)
             # auth/decrypt
             # auth/decrypt
@@ -169,101 +173,117 @@ class CryptoTestCase(BaseTestCase):
             self.assert_equal(cs.next_iv(), 1)
             self.assert_equal(cs.next_iv(), 1)
             # auth-failure due to corruption (corrupted aad)
             # auth-failure due to corruption (corrupted aad)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=1)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=1)
-            hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b'\0' + hdr_mac_iv_cdata[2:]
-            self.assert_raises(IntegrityError,
-                               lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
+            hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b"\0" + hdr_mac_iv_cdata[2:]
+            self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted))
 
 
     def test_AEAD_with_more_AAD(self):
     def test_AEAD_with_more_AAD(self):
         # test giving extra aad to the .encrypt() and .decrypt() calls
         # test giving extra aad to the .encrypt() and .decrypt() calls
-        key = b'X' * 32
+        key = b"X" * 32
         iv_int = 0
         iv_int = 0
-        data = b'foo' * 10
-        header = b'\x12\x34'
+        data = b"foo" * 10
+        header = b"\x12\x34"
         tests = [AES256_OCB, CHACHA20_POLY1305]
         tests = [AES256_OCB, CHACHA20_POLY1305]
         for cs_cls in tests:
         for cs_cls in tests:
             # encrypt/mac
             # encrypt/mac
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
-            hdr_mac_iv_cdata = cs.encrypt(data, header=header, aad=b'correct_chunkid')
+            hdr_mac_iv_cdata = cs.encrypt(data, header=header, aad=b"correct_chunkid")
             # successful auth/decrypt (correct aad)
             # successful auth/decrypt (correct aad)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
-            pdata = cs.decrypt(hdr_mac_iv_cdata, aad=b'correct_chunkid')
+            pdata = cs.decrypt(hdr_mac_iv_cdata, aad=b"correct_chunkid")
             self.assert_equal(data, pdata)
             self.assert_equal(data, pdata)
             # unsuccessful auth (incorrect aad)
             # unsuccessful auth (incorrect aad)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
             cs = cs_cls(key, iv_int, header_len=len(header), aad_offset=0)
-            self.assert_raises(IntegrityError,
-                               lambda: cs.decrypt(hdr_mac_iv_cdata, aad=b'incorrect_chunkid'))
+            self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata, aad=b"incorrect_chunkid"))
 
 
     # These test vectors come from https://www.kullo.net/blog/hkdf-sha-512-test-vectors/
     # These test vectors come from https://www.kullo.net/blog/hkdf-sha-512-test-vectors/
     # who claims to have verified these against independent Python and C++ implementations.
     # who claims to have verified these against independent Python and C++ implementations.
 
 
     def test_hkdf_hmac_sha512(self):
     def test_hkdf_hmac_sha512(self):
-        ikm = b'\x0b' * 22
-        salt = bytes.fromhex('000102030405060708090a0b0c')
-        info = bytes.fromhex('f0f1f2f3f4f5f6f7f8f9')
+        ikm = b"\x0b" * 22
+        salt = bytes.fromhex("000102030405060708090a0b0c")
+        info = bytes.fromhex("f0f1f2f3f4f5f6f7f8f9")
         l = 42
         l = 42
 
 
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
-        assert okm == bytes.fromhex('832390086cda71fb47625bb5ceb168e4c8e26a1a16ed34d9fc7fe92c1481579338da362cb8d9f925d7cb')
+        assert okm == bytes.fromhex(
+            "832390086cda71fb47625bb5ceb168e4c8e26a1a16ed34d9fc7fe92c1481579338da362cb8d9f925d7cb"
+        )
 
 
     def test_hkdf_hmac_sha512_2(self):
     def test_hkdf_hmac_sha512_2(self):
-        ikm = bytes.fromhex('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021222324252627'
-                            '28292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f')
-        salt = bytes.fromhex('606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868'
-                             '788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf')
-        info = bytes.fromhex('b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7'
-                             'd8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff')
+        ikm = bytes.fromhex(
+            "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021222324252627"
+            "28292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f"
+        )
+        salt = bytes.fromhex(
+            "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868"
+            "788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+        )
+        info = bytes.fromhex(
+            "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7"
+            "d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"
+        )
         l = 82
         l = 82
 
 
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
-        assert okm == bytes.fromhex('ce6c97192805b346e6161e821ed165673b84f400a2b514b2fe23d84cd189ddf1b695b48cbd1c838844'
-                                    '1137b3ce28f16aa64ba33ba466b24df6cfcb021ecff235f6a2056ce3af1de44d572097a8505d9e7a93')
+        assert okm == bytes.fromhex(
+            "ce6c97192805b346e6161e821ed165673b84f400a2b514b2fe23d84cd189ddf1b695b48cbd1c838844"
+            "1137b3ce28f16aa64ba33ba466b24df6cfcb021ecff235f6a2056ce3af1de44d572097a8505d9e7a93"
+        )
 
 
     def test_hkdf_hmac_sha512_3(self):
     def test_hkdf_hmac_sha512_3(self):
-        ikm = bytes.fromhex('0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b')
+        ikm = bytes.fromhex("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b")
         salt = None
         salt = None
-        info = b''
+        info = b""
         l = 42
         l = 42
 
 
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
-        assert okm == bytes.fromhex('f5fa02b18298a72a8c23898a8703472c6eb179dc204c03425c970e3b164bf90fff22d04836d0e2343bac')
+        assert okm == bytes.fromhex(
+            "f5fa02b18298a72a8c23898a8703472c6eb179dc204c03425c970e3b164bf90fff22d04836d0e2343bac"
+        )
 
 
     def test_hkdf_hmac_sha512_4(self):
     def test_hkdf_hmac_sha512_4(self):
-        ikm = bytes.fromhex('0b0b0b0b0b0b0b0b0b0b0b')
-        salt = bytes.fromhex('000102030405060708090a0b0c')
-        info = bytes.fromhex('f0f1f2f3f4f5f6f7f8f9')
+        ikm = bytes.fromhex("0b0b0b0b0b0b0b0b0b0b0b")
+        salt = bytes.fromhex("000102030405060708090a0b0c")
+        info = bytes.fromhex("f0f1f2f3f4f5f6f7f8f9")
         l = 42
         l = 42
 
 
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
-        assert okm == bytes.fromhex('7413e8997e020610fbf6823f2ce14bff01875db1ca55f68cfcf3954dc8aff53559bd5e3028b080f7c068')
+        assert okm == bytes.fromhex(
+            "7413e8997e020610fbf6823f2ce14bff01875db1ca55f68cfcf3954dc8aff53559bd5e3028b080f7c068"
+        )
 
 
     def test_hkdf_hmac_sha512_5(self):
     def test_hkdf_hmac_sha512_5(self):
-        ikm = bytes.fromhex('0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c')
+        ikm = bytes.fromhex("0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c")
         salt = None
         salt = None
-        info = b''
+        info = b""
         l = 42
         l = 42
 
 
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
         okm = hkdf_hmac_sha512(ikm, salt, info, l)
-        assert okm == bytes.fromhex('1407d46013d98bc6decefcfee55f0f90b0c7f63d68eb1a80eaf07e953cfc0a3a5240a155d6e4daa965bb')
+        assert okm == bytes.fromhex(
+            "1407d46013d98bc6decefcfee55f0f90b0c7f63d68eb1a80eaf07e953cfc0a3a5240a155d6e4daa965bb"
+        )
 
 
 
 
 def test_decrypt_key_file_argon2_chacha20_poly1305():
 def test_decrypt_key_file_argon2_chacha20_poly1305():
-    plain = b'hello'
+    plain = b"hello"
     # echo -n "hello, pass phrase" | argon2 saltsaltsaltsalt -id -t 1 -k 8 -p 1 -l 32 -r
     # echo -n "hello, pass phrase" | argon2 saltsaltsaltsalt -id -t 1 -k 8 -p 1 -l 32 -r
-    key = bytes.fromhex('a1b0cba145c154fbd8960996c5ce3428e9920cfe53c84ef08b4102a70832bcec')
+    key = bytes.fromhex("a1b0cba145c154fbd8960996c5ce3428e9920cfe53c84ef08b4102a70832bcec")
     ae_cipher = CHACHA20_POLY1305(key=key, iv=0, header_len=0, aad_offset=0)
     ae_cipher = CHACHA20_POLY1305(key=key, iv=0, header_len=0, aad_offset=0)
 
 
     envelope = ae_cipher.encrypt(plain)
     envelope = ae_cipher.encrypt(plain)
 
 
-    encrypted = msgpack.packb({
-        'version': 1,
-        'salt': b'salt'*4,
-        'argon2_time_cost': 1,
-        'argon2_memory_cost': 8,
-        'argon2_parallelism': 1,
-        'argon2_type': b'id',
-        'algorithm': 'argon2 chacha20-poly1305',
-        'data': envelope,
-    })
+    encrypted = msgpack.packb(
+        {
+            "version": 1,
+            "salt": b"salt" * 4,
+            "argon2_time_cost": 1,
+            "argon2_memory_cost": 8,
+            "argon2_parallelism": 1,
+            "argon2_type": b"id",
+            "algorithm": "argon2 chacha20-poly1305",
+            "data": envelope,
+        }
+    )
     key = CHPOKeyfileKey(None)
     key = CHPOKeyfileKey(None)
 
 
     decrypted = key.decrypt_key_file(encrypted, "hello, pass phrase")
     decrypted = key.decrypt_key_file(encrypted, "hello, pass phrase")
@@ -272,20 +292,15 @@ def test_decrypt_key_file_argon2_chacha20_poly1305():
 
 
 
 
 def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256():
 def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256():
-    plain = b'hello'
-    salt = b'salt'*4
+    plain = b"hello"
+    salt = b"salt" * 4
     passphrase = "hello, pass phrase"
     passphrase = "hello, pass phrase"
     key = FlexiKey.pbkdf2(passphrase, salt, 1, 32)
     key = FlexiKey.pbkdf2(passphrase, salt, 1, 32)
     hash = hmac_sha256(key, plain)
     hash = hmac_sha256(key, plain)
-    data = AES(key, b'\0'*16).encrypt(plain)
-    encrypted = msgpack.packb({
-        'version': 1,
-        'algorithm': 'sha256',
-        'iterations': 1,
-        'salt': salt,
-        'data': data,
-        'hash': hash,
-    })
+    data = AES(key, b"\0" * 16).encrypt(plain)
+    encrypted = msgpack.packb(
+        {"version": 1, "algorithm": "sha256", "iterations": 1, "salt": salt, "data": data, "hash": hash}
+    )
     key = CHPOKeyfileKey(None)
     key = CHPOKeyfileKey(None)
 
 
     decrypted = key.decrypt_key_file(encrypted, passphrase)
     decrypted = key.decrypt_key_file(encrypted, passphrase)
@@ -293,7 +308,7 @@ def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256():
     assert decrypted == plain
     assert decrypted == plain
 
 
 
 
-@unittest.mock.patch('getpass.getpass')
+@unittest.mock.patch("getpass.getpass")
 def test_repo_key_detect_does_not_raise_integrity_error(getpass, monkeypatch):
 def test_repo_key_detect_does_not_raise_integrity_error(getpass, monkeypatch):
     """https://github.com/borgbackup/borg/pull/6469#discussion_r832670411
     """https://github.com/borgbackup/borg/pull/6469#discussion_r832670411
 
 
@@ -322,10 +337,10 @@ def test_repo_key_detect_does_not_raise_integrity_error(getpass, monkeypatch):
     2. FlexiKey.detect() relies on that interface - it tries an empty passphrase before prompting the user
     2. FlexiKey.detect() relies on that interface - it tries an empty passphrase before prompting the user
     3. my initial implementation of decrypt_key_file_argon2() was simply passing through the IntegrityError() from AES256_CTR_BASE.decrypt()
     3. my initial implementation of decrypt_key_file_argon2() was simply passing through the IntegrityError() from AES256_CTR_BASE.decrypt()
     """
     """
-    repository = MagicMock(id=b'repository_id')
+    repository = MagicMock(id=b"repository_id")
     getpass.return_value = "hello, pass phrase"
     getpass.return_value = "hello, pass phrase"
-    monkeypatch.setenv('BORG_DISPLAY_PASSPHRASE', 'no')
-    AESOCBRepoKey.create(repository, args=MagicMock(key_algorithm='argon2'))
+    monkeypatch.setenv("BORG_DISPLAY_PASSPHRASE", "no")
+    AESOCBRepoKey.create(repository, args=MagicMock(key_algorithm="argon2"))
     repository.load_key.return_value = repository.save_key.call_args.args[0]
     repository.load_key.return_value = repository.save_key.call_args.args[0]
 
 
     AESOCBRepoKey.detect(repository, manifest_data=None)
     AESOCBRepoKey.detect(repository, manifest_data=None)

+ 12 - 12
src/borg/testsuite/efficient_collection_queue.py

@@ -6,13 +6,13 @@ from ..helpers.datastruct import EfficientCollectionQueue
 class TestEfficientQueue:
 class TestEfficientQueue:
     def test_base_usage(self):
     def test_base_usage(self):
         queue = EfficientCollectionQueue(100, bytes)
         queue = EfficientCollectionQueue(100, bytes)
-        assert queue.peek_front() == b''
-        queue.push_back(b'1234')
-        assert queue.peek_front() == b'1234'
+        assert queue.peek_front() == b""
+        queue.push_back(b"1234")
+        assert queue.peek_front() == b"1234"
         assert len(queue) == 4
         assert len(queue) == 4
         assert queue
         assert queue
         queue.pop_front(4)
         queue.pop_front(4)
-        assert queue.peek_front() == b''
+        assert queue.peek_front() == b""
         assert len(queue) == 0
         assert len(queue) == 0
         assert not queue
         assert not queue
 
 
@@ -30,22 +30,22 @@ class TestEfficientQueue:
 
 
     def test_chunking(self):
     def test_chunking(self):
         queue = EfficientCollectionQueue(2, bytes)
         queue = EfficientCollectionQueue(2, bytes)
-        queue.push_back(b'1')
-        queue.push_back(b'23')
-        queue.push_back(b'4567')
+        queue.push_back(b"1")
+        queue.push_back(b"23")
+        queue.push_back(b"4567")
         assert len(queue) == 7
         assert len(queue) == 7
-        assert queue.peek_front() == b'12'
+        assert queue.peek_front() == b"12"
         queue.pop_front(3)
         queue.pop_front(3)
-        assert queue.peek_front() == b'4'
+        assert queue.peek_front() == b"4"
         queue.pop_front(1)
         queue.pop_front(1)
-        assert queue.peek_front() == b'56'
+        assert queue.peek_front() == b"56"
         queue.pop_front(2)
         queue.pop_front(2)
         assert len(queue) == 1
         assert len(queue) == 1
         assert queue
         assert queue
         with pytest.raises(EfficientCollectionQueue.SizeUnderflow):
         with pytest.raises(EfficientCollectionQueue.SizeUnderflow):
             queue.pop_front(2)
             queue.pop_front(2)
-        assert queue.peek_front() == b'7'
+        assert queue.peek_front() == b"7"
         queue.pop_front(1)
         queue.pop_front(1)
-        assert queue.peek_front() == b''
+        assert queue.peek_front() == b""
         assert len(queue) == 0
         assert len(queue) == 0
         assert not queue
         assert not queue

+ 51 - 58
src/borg/testsuite/file_integrity.py

@@ -5,34 +5,30 @@ from ..crypto.file_integrity import IntegrityCheckedFile, DetachedIntegrityCheck
 
 
 class TestReadIntegrityFile:
 class TestReadIntegrityFile:
     def test_no_integrity(self, tmpdir):
     def test_no_integrity(self, tmpdir):
-        protected_file = tmpdir.join('file')
-        protected_file.write('1234')
+        protected_file = tmpdir.join("file")
+        protected_file.write("1234")
         assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None
         assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None
 
 
     def test_truncated_integrity(self, tmpdir):
     def test_truncated_integrity(self, tmpdir):
-        protected_file = tmpdir.join('file')
-        protected_file.write('1234')
-        tmpdir.join('file.integrity').write('')
+        protected_file = tmpdir.join("file")
+        protected_file.write("1234")
+        tmpdir.join("file.integrity").write("")
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file))
             DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file))
 
 
     def test_unknown_algorithm(self, tmpdir):
     def test_unknown_algorithm(self, tmpdir):
-        protected_file = tmpdir.join('file')
-        protected_file.write('1234')
-        tmpdir.join('file.integrity').write('{"algorithm": "HMAC_SERIOUSHASH", "digests": "1234"}')
+        protected_file = tmpdir.join("file")
+        protected_file.write("1234")
+        tmpdir.join("file.integrity").write('{"algorithm": "HMAC_SERIOUSHASH", "digests": "1234"}')
         assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None
         assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None
 
 
-    @pytest.mark.parametrize('json', (
-        '{"ALGORITHM": "HMAC_SERIOUSHASH", "digests": "1234"}',
-        '[]',
-        '1234.5',
-        '"A string"',
-        'Invalid JSON',
-    ))
+    @pytest.mark.parametrize(
+        "json", ('{"ALGORITHM": "HMAC_SERIOUSHASH", "digests": "1234"}', "[]", "1234.5", '"A string"', "Invalid JSON")
+    )
     def test_malformed(self, tmpdir, json):
     def test_malformed(self, tmpdir, json):
-        protected_file = tmpdir.join('file')
-        protected_file.write('1234')
-        tmpdir.join('file.integrity').write(json)
+        protected_file = tmpdir.join("file")
+        protected_file.write("1234")
+        tmpdir.join("file.integrity").write(json)
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file))
             DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file))
 
 
@@ -40,74 +36,71 @@ class TestReadIntegrityFile:
 class TestDetachedIntegrityCheckedFile:
 class TestDetachedIntegrityCheckedFile:
     @pytest.fixture
     @pytest.fixture
     def integrity_protected_file(self, tmpdir):
     def integrity_protected_file(self, tmpdir):
-        path = str(tmpdir.join('file'))
+        path = str(tmpdir.join("file"))
         with DetachedIntegrityCheckedFile(path, write=True) as fd:
         with DetachedIntegrityCheckedFile(path, write=True) as fd:
-            fd.write(b'foo and bar')
+            fd.write(b"foo and bar")
         return path
         return path
 
 
     def test_simple(self, tmpdir, integrity_protected_file):
     def test_simple(self, tmpdir, integrity_protected_file):
-        assert tmpdir.join('file').check(file=True)
-        assert tmpdir.join('file.integrity').check(file=True)
+        assert tmpdir.join("file").check(file=True)
+        assert tmpdir.join("file.integrity").check(file=True)
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-            assert fd.read() == b'foo and bar'
+            assert fd.read() == b"foo and bar"
 
 
     def test_corrupted_file(self, integrity_protected_file):
     def test_corrupted_file(self, integrity_protected_file):
-        with open(integrity_protected_file, 'ab') as fd:
-            fd.write(b' extra data')
+        with open(integrity_protected_file, "ab") as fd:
+            fd.write(b" extra data")
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-                assert fd.read() == b'foo and bar extra data'
+                assert fd.read() == b"foo and bar extra data"
 
 
     def test_corrupted_file_partial_read(self, integrity_protected_file):
     def test_corrupted_file_partial_read(self, integrity_protected_file):
-        with open(integrity_protected_file, 'ab') as fd:
-            fd.write(b' extra data')
+        with open(integrity_protected_file, "ab") as fd:
+            fd.write(b" extra data")
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-                data = b'foo and bar'
+                data = b"foo and bar"
                 assert fd.read(len(data)) == data
                 assert fd.read(len(data)) == data
 
 
-    @pytest.mark.parametrize('new_name', (
-        'different_file',
-        'different_file.different_ext',
-    ))
+    @pytest.mark.parametrize("new_name", ("different_file", "different_file.different_ext"))
     def test_renamed_file(self, tmpdir, integrity_protected_file, new_name):
     def test_renamed_file(self, tmpdir, integrity_protected_file, new_name):
         new_path = tmpdir.join(new_name)
         new_path = tmpdir.join(new_name)
-        tmpdir.join('file').move(new_path)
-        tmpdir.join('file.integrity').move(new_path + '.integrity')
+        tmpdir.join("file").move(new_path)
+        tmpdir.join("file.integrity").move(new_path + ".integrity")
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             with DetachedIntegrityCheckedFile(str(new_path), write=False) as fd:
             with DetachedIntegrityCheckedFile(str(new_path), write=False) as fd:
-                assert fd.read() == b'foo and bar'
+                assert fd.read() == b"foo and bar"
 
 
     def test_moved_file(self, tmpdir, integrity_protected_file):
     def test_moved_file(self, tmpdir, integrity_protected_file):
-        new_dir = tmpdir.mkdir('another_directory')
-        tmpdir.join('file').move(new_dir.join('file'))
-        tmpdir.join('file.integrity').move(new_dir.join('file.integrity'))
-        new_path = str(new_dir.join('file'))
+        new_dir = tmpdir.mkdir("another_directory")
+        tmpdir.join("file").move(new_dir.join("file"))
+        tmpdir.join("file.integrity").move(new_dir.join("file.integrity"))
+        new_path = str(new_dir.join("file"))
         with DetachedIntegrityCheckedFile(new_path, write=False) as fd:
         with DetachedIntegrityCheckedFile(new_path, write=False) as fd:
-            assert fd.read() == b'foo and bar'
+            assert fd.read() == b"foo and bar"
 
 
     def test_no_integrity(self, tmpdir, integrity_protected_file):
     def test_no_integrity(self, tmpdir, integrity_protected_file):
-        tmpdir.join('file.integrity').remove()
+        tmpdir.join("file.integrity").remove()
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-            assert fd.read() == b'foo and bar'
+            assert fd.read() == b"foo and bar"
 
 
 
 
 class TestDetachedIntegrityCheckedFileParts:
 class TestDetachedIntegrityCheckedFileParts:
     @pytest.fixture
     @pytest.fixture
     def integrity_protected_file(self, tmpdir):
     def integrity_protected_file(self, tmpdir):
-        path = str(tmpdir.join('file'))
+        path = str(tmpdir.join("file"))
         with DetachedIntegrityCheckedFile(path, write=True) as fd:
         with DetachedIntegrityCheckedFile(path, write=True) as fd:
-            fd.write(b'foo and bar')
-            fd.hash_part('foopart')
-            fd.write(b' other data')
+            fd.write(b"foo and bar")
+            fd.hash_part("foopart")
+            fd.write(b" other data")
         return path
         return path
 
 
     def test_simple(self, integrity_protected_file):
     def test_simple(self, integrity_protected_file):
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
         with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-            data1 = b'foo and bar'
+            data1 = b"foo and bar"
             assert fd.read(len(data1)) == data1
             assert fd.read(len(data1)) == data1
-            fd.hash_part('foopart')
-            assert fd.read() == b' other data'
+            fd.hash_part("foopart")
+            assert fd.read() == b" other data"
 
 
     def test_wrong_part_name(self, integrity_protected_file):
     def test_wrong_part_name(self, integrity_protected_file):
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
@@ -115,25 +108,25 @@ class TestDetachedIntegrityCheckedFileParts:
             # the failing hash_part. This is intentional: (1) it makes the code simpler (2) it's a good fail-safe
             # the failing hash_part. This is intentional: (1) it makes the code simpler (2) it's a good fail-safe
             # against overly broad exception handling.
             # against overly broad exception handling.
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-                data1 = b'foo and bar'
+                data1 = b"foo and bar"
                 assert fd.read(len(data1)) == data1
                 assert fd.read(len(data1)) == data1
                 with pytest.raises(FileIntegrityError):
                 with pytest.raises(FileIntegrityError):
                     # This specific bit raises it directly
                     # This specific bit raises it directly
-                    fd.hash_part('barpart')
+                    fd.hash_part("barpart")
                 # Still explodes in the end.
                 # Still explodes in the end.
 
 
-    @pytest.mark.parametrize('partial_read', (False, True))
+    @pytest.mark.parametrize("partial_read", (False, True))
     def test_part_independence(self, integrity_protected_file, partial_read):
     def test_part_independence(self, integrity_protected_file, partial_read):
-        with open(integrity_protected_file, 'ab') as fd:
-            fd.write(b'some extra stuff that does not belong')
+        with open(integrity_protected_file, "ab") as fd:
+            fd.write(b"some extra stuff that does not belong")
         with pytest.raises(FileIntegrityError):
         with pytest.raises(FileIntegrityError):
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
             with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd:
-                data1 = b'foo and bar'
+                data1 = b"foo and bar"
                 try:
                 try:
                     assert fd.read(len(data1)) == data1
                     assert fd.read(len(data1)) == data1
-                    fd.hash_part('foopart')
+                    fd.hash_part("foopart")
                 except FileIntegrityError:
                 except FileIntegrityError:
-                    assert False, 'This part must not raise, since this part is still valid.'
+                    assert False, "This part must not raise, since this part is still valid."
                 if not partial_read:
                 if not partial_read:
                     fd.read()
                     fd.read()
                 # But overall it explodes with the final digest. Neat, eh?
                 # But overall it explodes with the final digest. Neat, eh?

+ 36 - 32
src/borg/testsuite/hashindex.py

@@ -15,7 +15,7 @@ from . import BaseTestCase, unopened_tempfile
 
 
 def H(x):
 def H(x):
     # make some 32byte long thing that depends on x
     # make some 32byte long thing that depends on x
-    return bytes('%-0.32d' % x, 'ascii')
+    return bytes("%-0.32d" % x, "ascii")
 
 
 
 
 def H2(x):
 def H2(x):
@@ -24,7 +24,6 @@ def H2(x):
 
 
 
 
 class HashIndexTestCase(BaseTestCase):
 class HashIndexTestCase(BaseTestCase):
-
     def _generic_test(self, cls, make_value, sha):
     def _generic_test(self, cls, make_value, sha):
         idx = cls()
         idx = cls()
         self.assert_equal(len(idx), 0)
         self.assert_equal(len(idx), 0)
@@ -57,7 +56,7 @@ class HashIndexTestCase(BaseTestCase):
             idx.write(filepath)
             idx.write(filepath)
             del idx
             del idx
             # Verify file contents
             # Verify file contents
-            with open(filepath, 'rb') as fd:
+            with open(filepath, "rb") as fd:
                 self.assert_equal(hashlib.sha256(fd.read()).hexdigest(), sha)
                 self.assert_equal(hashlib.sha256(fd.read()).hexdigest(), sha)
             # Make sure we can open the file
             # Make sure we can open the file
             idx = cls.read(filepath)
             idx = cls.read(filepath)
@@ -86,12 +85,14 @@ class HashIndexTestCase(BaseTestCase):
         del idx
         del idx
 
 
     def test_nsindex(self):
     def test_nsindex(self):
-        self._generic_test(NSIndex, lambda x: (x, x, x),
-                           '7d70671d0b7e9d2f51b2691ecf35184b9f8ecc1202cceb2748c905c8fc04c256')
+        self._generic_test(
+            NSIndex, lambda x: (x, x, x), "7d70671d0b7e9d2f51b2691ecf35184b9f8ecc1202cceb2748c905c8fc04c256"
+        )
 
 
     def test_chunkindex(self):
     def test_chunkindex(self):
-        self._generic_test(ChunkIndex, lambda x: (x, x),
-                           '85f72b036c692c8266e4f51ccf0cff2147204282b5e316ae508d30a448d88fef')
+        self._generic_test(
+            ChunkIndex, lambda x: (x, x), "85f72b036c692c8266e4f51ccf0cff2147204282b5e316ae508d30a448d88fef"
+        )
 
 
     def test_resize(self):
     def test_resize(self):
         n = 2000  # Must be >= MIN_BUCKETS
         n = 2000  # Must be >= MIN_BUCKETS
@@ -218,8 +219,8 @@ class HashIndexTestCase(BaseTestCase):
 
 
 
 
 class HashIndexExtraTestCase(BaseTestCase):
 class HashIndexExtraTestCase(BaseTestCase):
-    """These tests are separate because they should not become part of the selftest.
-    """
+    """These tests are separate because they should not become part of the selftest."""
+
     def test_chunk_indexer(self):
     def test_chunk_indexer(self):
         # see _hashindex.c hash_sizes, we want to be close to the max. load
         # see _hashindex.c hash_sizes, we want to be close to the max. load
         # because interesting errors happen there.
         # because interesting errors happen there.
@@ -227,7 +228,7 @@ class HashIndexExtraTestCase(BaseTestCase):
         index = ChunkIndex(key_count)
         index = ChunkIndex(key_count)
         all_keys = [hashlib.sha256(H(k)).digest() for k in range(key_count)]
         all_keys = [hashlib.sha256(H(k)).digest() for k in range(key_count)]
         # we're gonna delete 1/3 of all_keys, so let's split them 2/3 and 1/3:
         # we're gonna delete 1/3 of all_keys, so let's split them 2/3 and 1/3:
-        keys, to_delete_keys = all_keys[0:(2*key_count//3)], all_keys[(2*key_count//3):]
+        keys, to_delete_keys = all_keys[0 : (2 * key_count // 3)], all_keys[(2 * key_count // 3) :]
 
 
         for i, key in enumerate(keys):
         for i, key in enumerate(keys):
             index[key] = (i, i)
             index[key] = (i, i)
@@ -286,6 +287,7 @@ class HashIndexRefcountingTestCase(BaseTestCase):
             idx1.merge(idx2)
             idx1.merge(idx2)
             refcount, *_ = idx1[H(1)]
             refcount, *_ = idx1[H(1)]
             return refcount
             return refcount
+
         result = merge(refcounta, refcountb)
         result = merge(refcounta, refcountb)
         # check for commutativity
         # check for commutativity
         assert result == merge(refcountb, refcounta)
         assert result == merge(refcountb, refcounta)
@@ -367,22 +369,24 @@ class HashIndexRefcountingTestCase(BaseTestCase):
 
 
 class HashIndexDataTestCase(BaseTestCase):
 class HashIndexDataTestCase(BaseTestCase):
     # This bytestring was created with borg2-pre 2022-06-10
     # This bytestring was created with borg2-pre 2022-06-10
-    HASHINDEX = b'eJzt0LEJg1AYhdE/JqBjOEJMNhBBrAQrO9ewc+HsoG+CPMsEz1cfbnHbceqXoZvvEVE+IuoqMu2pnOE4' \
-                b'juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4' \
-                b'juM4juM4juM4jruie36vuSVT5N0rzW0n9t7r5z9+4TiO4ziO4ziO4ziO4ziO4ziO4ziO4ziO4ziO4ziO' \
-                b'4ziO4ziO4ziO4ziO4ziO437LHbSVHGw='
+    HASHINDEX = (
+        b"eJzt0LEJg1AYhdE/JqBjOEJMNhBBrAQrO9ewc+HsoG+CPMsEz1cfbnHbceqXoZvvEVE+IuoqMu2pnOE4"
+        b"juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4juM4"
+        b"juM4juM4juM4jruie36vuSVT5N0rzW0n9t7r5z9+4TiO4ziO4ziO4ziO4ziO4ziO4ziO4ziO4ziO4ziO"
+        b"4ziO4ziO4ziO4ziO4ziO437LHbSVHGw="
+    )
 
 
     def _serialize_hashindex(self, idx):
     def _serialize_hashindex(self, idx):
         with tempfile.TemporaryDirectory() as tempdir:
         with tempfile.TemporaryDirectory() as tempdir:
-            file = os.path.join(tempdir, 'idx')
+            file = os.path.join(tempdir, "idx")
             idx.write(file)
             idx.write(file)
-            with open(file, 'rb') as f:
+            with open(file, "rb") as f:
                 return self._pack(f.read())
                 return self._pack(f.read())
 
 
     def _deserialize_hashindex(self, bytestring):
     def _deserialize_hashindex(self, bytestring):
         with tempfile.TemporaryDirectory() as tempdir:
         with tempfile.TemporaryDirectory() as tempdir:
-            file = os.path.join(tempdir, 'idx')
-            with open(file, 'wb') as f:
+            file = os.path.join(tempdir, "idx")
+            with open(file, "wb") as f:
                 f.write(self._unpack(bytestring))
                 f.write(self._unpack(bytestring))
             return ChunkIndex.read(file)
             return ChunkIndex.read(file)
 
 
@@ -416,19 +420,19 @@ class HashIndexDataTestCase(BaseTestCase):
 class HashIndexIntegrityTestCase(HashIndexDataTestCase):
 class HashIndexIntegrityTestCase(HashIndexDataTestCase):
     def write_integrity_checked_index(self, tempdir):
     def write_integrity_checked_index(self, tempdir):
         idx = self._deserialize_hashindex(self.HASHINDEX)
         idx = self._deserialize_hashindex(self.HASHINDEX)
-        file = os.path.join(tempdir, 'idx')
+        file = os.path.join(tempdir, "idx")
         with IntegrityCheckedFile(path=file, write=True) as fd:
         with IntegrityCheckedFile(path=file, write=True) as fd:
             idx.write(fd)
             idx.write(fd)
         integrity_data = fd.integrity_data
         integrity_data = fd.integrity_data
-        assert 'final' in integrity_data
-        assert 'HashHeader' in integrity_data
+        assert "final" in integrity_data
+        assert "HashHeader" in integrity_data
         return file, integrity_data
         return file, integrity_data
 
 
     def test_integrity_checked_file(self):
     def test_integrity_checked_file(self):
         with tempfile.TemporaryDirectory() as tempdir:
         with tempfile.TemporaryDirectory() as tempdir:
             file, integrity_data = self.write_integrity_checked_index(tempdir)
             file, integrity_data = self.write_integrity_checked_index(tempdir)
-            with open(file, 'r+b') as fd:
-                fd.write(b'Foo')
+            with open(file, "r+b") as fd:
+                fd.write(b"Foo")
             with self.assert_raises(FileIntegrityError):
             with self.assert_raises(FileIntegrityError):
                 with IntegrityCheckedFile(path=file, write=False, integrity_data=integrity_data) as fd:
                 with IntegrityCheckedFile(path=file, write=False, integrity_data=integrity_data) as fd:
                     ChunkIndex.read(fd)
                     ChunkIndex.read(fd)
@@ -437,15 +441,15 @@ class HashIndexIntegrityTestCase(HashIndexDataTestCase):
 class HashIndexCompactTestCase(HashIndexDataTestCase):
 class HashIndexCompactTestCase(HashIndexDataTestCase):
     def index(self, num_entries, num_buckets):
     def index(self, num_entries, num_buckets):
         index_data = io.BytesIO()
         index_data = io.BytesIO()
-        index_data.write(b'BORG_IDX')
+        index_data.write(b"BORG_IDX")
         # num_entries
         # num_entries
-        index_data.write(num_entries.to_bytes(4, 'little'))
+        index_data.write(num_entries.to_bytes(4, "little"))
         # num_buckets
         # num_buckets
-        index_data.write(num_buckets.to_bytes(4, 'little'))
+        index_data.write(num_buckets.to_bytes(4, "little"))
         # key_size
         # key_size
-        index_data.write((32).to_bytes(1, 'little'))
+        index_data.write((32).to_bytes(1, "little"))
         # value_size
         # value_size
-        index_data.write((3 * 4).to_bytes(1, 'little'))
+        index_data.write((3 * 4).to_bytes(1, "little"))
 
 
         self.index_data = index_data
         self.index_data = index_data
 
 
@@ -468,13 +472,13 @@ class HashIndexCompactTestCase(HashIndexDataTestCase):
     def write_entry(self, key, *values):
     def write_entry(self, key, *values):
         self.index_data.write(key)
         self.index_data.write(key)
         for value in values:
         for value in values:
-            self.index_data.write(value.to_bytes(4, 'little'))
+            self.index_data.write(value.to_bytes(4, "little"))
 
 
     def write_empty(self, key):
     def write_empty(self, key):
-        self.write_entry(key, 0xffffffff, 0, 0)
+        self.write_entry(key, 0xFFFFFFFF, 0, 0)
 
 
     def write_deleted(self, key):
     def write_deleted(self, key):
-        self.write_entry(key, 0xfffffffe, 0, 0)
+        self.write_entry(key, 0xFFFFFFFE, 0, 0)
 
 
     def test_simple(self):
     def test_simple(self):
         self.index(num_entries=3, num_buckets=6)
         self.index(num_entries=3, num_buckets=6)
@@ -600,7 +604,7 @@ class IndexCorruptionTestCase(BaseTestCase):
             # first 4 bytes. giving a specific x targets bucket index x.
             # first 4 bytes. giving a specific x targets bucket index x.
             # y is to create different keys and does not go into the bucket index calculation.
             # y is to create different keys and does not go into the bucket index calculation.
             # so, same x + different y --> collision
             # so, same x + different y --> collision
-            return pack('<IIIIIIII', x, y, z, 0, 0, 0, 0, 0)  # 8 * 4 == 32
+            return pack("<IIIIIIII", x, y, z, 0, 0, 0, 0, 0)  # 8 * 4 == 32
 
 
         idx = NSIndex()
         idx = NSIndex()
 
 

Diff do ficheiro suprimidas por serem muito extensas
+ 395 - 340
src/borg/testsuite/helpers.py


+ 36 - 39
src/borg/testsuite/item.py

@@ -11,20 +11,20 @@ def test_item_empty():
 
 
     assert item.as_dict() == {}
     assert item.as_dict() == {}
 
 
-    assert 'path' not in item
+    assert "path" not in item
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        'invalid-key' in item
+        "invalid-key" in item
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
-        b'path' in item
+        b"path" in item
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
         42 in item
         42 in item
 
 
-    assert item.get('mode') is None
-    assert item.get('mode', 0o666) == 0o666
+    assert item.get("mode") is None
+    assert item.get("mode", 0o666) == 0o666
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        item.get('invalid-key')
+        item.get("invalid-key")
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
-        item.get(b'mode')
+        item.get(b"mode")
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
         item.get(42)
         item.get(42)
 
 
@@ -37,16 +37,16 @@ def test_item_empty():
 
 
 def test_item_from_dict():
 def test_item_from_dict():
     # does not matter whether we get str or bytes keys
     # does not matter whether we get str or bytes keys
-    item = Item({b'path': '/a/b/c', b'mode': 0o666})
-    assert item.path == '/a/b/c'
+    item = Item({b"path": "/a/b/c", b"mode": 0o666})
+    assert item.path == "/a/b/c"
     assert item.mode == 0o666
     assert item.mode == 0o666
-    assert 'path' in item
+    assert "path" in item
 
 
     # does not matter whether we get str or bytes keys
     # does not matter whether we get str or bytes keys
-    item = Item({'path': '/a/b/c', 'mode': 0o666})
-    assert item.path == '/a/b/c'
+    item = Item({"path": "/a/b/c", "mode": 0o666})
+    assert item.path == "/a/b/c"
     assert item.mode == 0o666
     assert item.mode == 0o666
-    assert 'mode' in item
+    assert "mode" in item
 
 
     # invalid - no dict
     # invalid - no dict
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
@@ -58,12 +58,12 @@ def test_item_from_dict():
 
 
     # invalid - unknown key
     # invalid - unknown key
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        Item({'foobar': 'baz'})
+        Item({"foobar": "baz"})
 
 
 
 
 def test_item_from_kw():
 def test_item_from_kw():
-    item = Item(path='/a/b/c', mode=0o666)
-    assert item.path == '/a/b/c'
+    item = Item(path="/a/b/c", mode=0o666)
+    assert item.path == "/a/b/c"
     assert item.mode == 0o666
     assert item.mode == 0o666
 
 
 
 
@@ -71,7 +71,7 @@ def test_item_int_property():
     item = Item()
     item = Item()
     item.mode = 0o666
     item.mode = 0o666
     assert item.mode == 0o666
     assert item.mode == 0o666
-    assert item.as_dict() == {'mode': 0o666}
+    assert item.as_dict() == {"mode": 0o666}
     del item.mode
     del item.mode
     assert item.as_dict() == {}
     assert item.as_dict() == {}
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
@@ -80,34 +80,34 @@ def test_item_int_property():
 
 
 def test_item_mptimestamp_property():
 def test_item_mptimestamp_property():
     item = Item()
     item = Item()
-    small, big = 42, 2 ** 65
+    small, big = 42, 2**65
     item.atime = small
     item.atime = small
     assert item.atime == small
     assert item.atime == small
-    assert item.as_dict() == {'atime': Timestamp.from_unix_nano(small)}
+    assert item.as_dict() == {"atime": Timestamp.from_unix_nano(small)}
     item.atime = big
     item.atime = big
     assert item.atime == big
     assert item.atime == big
-    assert item.as_dict() == {'atime': Timestamp.from_unix_nano(big)}
+    assert item.as_dict() == {"atime": Timestamp.from_unix_nano(big)}
 
 
 
 
 def test_item_se_str_property():
 def test_item_se_str_property():
     # start simple
     # start simple
     item = Item()
     item = Item()
-    item.path = '/a/b/c'
-    assert item.path == '/a/b/c'
-    assert item.as_dict() == {'path': '/a/b/c'}
+    item.path = "/a/b/c"
+    assert item.path == "/a/b/c"
+    assert item.as_dict() == {"path": "/a/b/c"}
     del item.path
     del item.path
     assert item.as_dict() == {}
     assert item.as_dict() == {}
     with pytest.raises(TypeError):
     with pytest.raises(TypeError):
         item.path = 42
         item.path = 42
 
 
     # non-utf-8 path, needing surrogate-escaping for latin-1 u-umlaut
     # non-utf-8 path, needing surrogate-escaping for latin-1 u-umlaut
-    item = Item(internal_dict={'path': b'/a/\xfc/c'})
-    assert item.path == '/a/\udcfc/c'  # getting a surrogate-escaped representation
-    assert item.as_dict() == {'path': '/a/\udcfc/c'}
+    item = Item(internal_dict={"path": b"/a/\xfc/c"})
+    assert item.path == "/a/\udcfc/c"  # getting a surrogate-escaped representation
+    assert item.as_dict() == {"path": "/a/\udcfc/c"}
     del item.path
     del item.path
-    assert 'path' not in item
-    item.path = '/a/\udcfc/c'  # setting using a surrogate-escaped representation
-    assert item.as_dict() == {'path': '/a/\udcfc/c'}
+    assert "path" not in item
+    item.path = "/a/\udcfc/c"  # setting using a surrogate-escaped representation
+    assert item.as_dict() == {"path": "/a/\udcfc/c"}
 
 
 
 
 def test_item_list_property():
 def test_item_list_property():
@@ -118,18 +118,18 @@ def test_item_list_property():
     assert item.chunks == [0]
     assert item.chunks == [0]
     item.chunks.append(1)
     item.chunks.append(1)
     assert item.chunks == [0, 1]
     assert item.chunks == [0, 1]
-    assert item.as_dict() == {'chunks': [0, 1]}
+    assert item.as_dict() == {"chunks": [0, 1]}
 
 
 
 
 def test_item_dict_property():
 def test_item_dict_property():
     item = Item()
     item = Item()
     item.xattrs = StableDict()
     item.xattrs = StableDict()
     assert item.xattrs == StableDict()
     assert item.xattrs == StableDict()
-    item.xattrs['foo'] = 'bar'
-    assert item.xattrs['foo'] == 'bar'
-    item.xattrs['bar'] = 'baz'
-    assert item.xattrs == StableDict({'foo': 'bar', 'bar': 'baz'})
-    assert item.as_dict() == {'xattrs': {'foo': 'bar', 'bar': 'baz'}}
+    item.xattrs["foo"] = "bar"
+    assert item.xattrs["foo"] == "bar"
+    item.xattrs["bar"] = "baz"
+    assert item.xattrs == StableDict({"foo": "bar", "bar": "baz"})
+    assert item.as_dict() == {"xattrs": {"foo": "bar", "bar": "baz"}}
 
 
 
 
 def test_unknown_property():
 def test_unknown_property():
@@ -142,10 +142,7 @@ def test_unknown_property():
 
 
 
 
 def test_item_file_size():
 def test_item_file_size():
-    item = Item(mode=0o100666, chunks=[
-        ChunkListEntry(size=1000, id=None),
-        ChunkListEntry(size=2000, id=None),
-    ])
+    item = Item(mode=0o100666, chunks=[ChunkListEntry(size=1000, id=None), ChunkListEntry(size=2000, id=None)])
     assert item.get_size() == 3000
     assert item.get_size() == 3000
     item.get_size(memorize=True)
     item.get_size(memorize=True)
     assert item.size == 3000
     assert item.size == 3000

+ 116 - 117
src/borg/testsuite/key.py

@@ -11,7 +11,13 @@ from ..crypto.key import RepoKey, KeyfileKey, Blake2RepoKey, Blake2KeyfileKey
 from ..crypto.key import AESOCBRepoKey, AESOCBKeyfileKey, CHPORepoKey, CHPOKeyfileKey
 from ..crypto.key import AESOCBRepoKey, AESOCBKeyfileKey, CHPORepoKey, CHPOKeyfileKey
 from ..crypto.key import Blake2AESOCBRepoKey, Blake2AESOCBKeyfileKey, Blake2CHPORepoKey, Blake2CHPOKeyfileKey
 from ..crypto.key import Blake2AESOCBRepoKey, Blake2AESOCBKeyfileKey, Blake2CHPORepoKey, Blake2CHPOKeyfileKey
 from ..crypto.key import ID_HMAC_SHA_256, ID_BLAKE2b_256
 from ..crypto.key import ID_HMAC_SHA_256, ID_BLAKE2b_256
-from ..crypto.key import TAMRequiredError, TAMInvalid, TAMUnsupportedSuiteError, UnsupportedManifestError, UnsupportedKeyFormatError
+from ..crypto.key import (
+    TAMRequiredError,
+    TAMInvalid,
+    TAMUnsupportedSuiteError,
+    UnsupportedManifestError,
+    UnsupportedKeyFormatError,
+)
 from ..crypto.key import identify_key
 from ..crypto.key import identify_key
 from ..crypto.low_level import IntegrityError as IntegrityErrorBase
 from ..crypto.low_level import IntegrityError as IntegrityErrorBase
 from ..helpers import IntegrityError
 from ..helpers import IntegrityError
@@ -36,11 +42,17 @@ class TestKey:
         F84MsMMiqpbz4KVICeBZhfAaTPs4W7BC63qml0ZXJhdGlvbnPOAAGGoKRzYWx02gAgLENQ
         F84MsMMiqpbz4KVICeBZhfAaTPs4W7BC63qml0ZXJhdGlvbnPOAAGGoKRzYWx02gAgLENQ
         2uVCoR7EnAoiRzn8J+orbojKtJlNCnQ31SSC8rendmVyc2lvbgE=""".strip()
         2uVCoR7EnAoiRzn8J+orbojKtJlNCnQ31SSC8rendmVyc2lvbgE=""".strip()
 
 
-    keyfile2_cdata = unhexlify(re.sub(r'\W', '', """
+    keyfile2_cdata = unhexlify(
+        re.sub(
+            r"\W",
+            "",
+            """
         0055f161493fcfc16276e8c31493c4641e1eb19a79d0326fad0291e5a9c98e5933
         0055f161493fcfc16276e8c31493c4641e1eb19a79d0326fad0291e5a9c98e5933
         00000000000003e8d21eaf9b86c297a8cd56432e1915bb
         00000000000003e8d21eaf9b86c297a8cd56432e1915bb
-        """))
-    keyfile2_id = unhexlify('c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314')
+        """,
+        )
+    )
+    keyfile2_id = unhexlify("c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314")
 
 
     keyfile_blake2_key_file = """
     keyfile_blake2_key_file = """
         BORG_KEY 0000000000000000000000000000000000000000000000000000000000000000
         BORG_KEY 0000000000000000000000000000000000000000000000000000000000000000
@@ -56,8 +68,9 @@ class TestKey:
         UTHFJg343jqml0ZXJhdGlvbnPOAAGGoKRzYWx02gAgz3YaUZZ/s+UWywj97EY5b4KhtJYi
         UTHFJg343jqml0ZXJhdGlvbnPOAAGGoKRzYWx02gAgz3YaUZZ/s+UWywj97EY5b4KhtJYi
         qkPqtDDxs2j/T7+ndmVyc2lvbgE=""".strip()
         qkPqtDDxs2j/T7+ndmVyc2lvbgE=""".strip()
 
 
-    keyfile_blake2_cdata = bytes.fromhex('04fdf9475cf2323c0ba7a99ddc011064f2e7d039f539f2e448'
-                                         '0e6f5fc6ff9993d604040404040404098c8cee1c6db8c28947')
+    keyfile_blake2_cdata = bytes.fromhex(
+        "04fdf9475cf2323c0ba7a99ddc011064f2e7d039f539f2e448" "0e6f5fc6ff9993d604040404040404098c8cee1c6db8c28947"
+    )
     # Verified against b2sum. Entire string passed to BLAKE2, including the padded 64 byte key contained in
     # Verified against b2sum. Entire string passed to BLAKE2, including the padded 64 byte key contained in
     # keyfile_blake2_key_file above is
     # keyfile_blake2_key_file above is
     # 19280471de95185ec27ecb6fc9edbb4f4db26974c315ede1cd505fab4250ce7cd0d081ea66946c
     # 19280471de95185ec27ecb6fc9edbb4f4db26974c315ede1cd505fab4250ce7cd0d081ea66946c
@@ -65,33 +78,42 @@ class TestKey:
     # 000000000000000000000000000000000000000000000000000000000000000000000000000000
     # 000000000000000000000000000000000000000000000000000000000000000000000000000000
     # 00000000000000000000007061796c6f6164
     # 00000000000000000000007061796c6f6164
     #                       p a y l o a d
     #                       p a y l o a d
-    keyfile_blake2_id = bytes.fromhex('d8bc68e961c79f99be39061589e5179b2113cd9226e07b08ddd4a1fef7ce93fb')
+    keyfile_blake2_id = bytes.fromhex("d8bc68e961c79f99be39061589e5179b2113cd9226e07b08ddd4a1fef7ce93fb")
 
 
     @pytest.fixture
     @pytest.fixture
     def keys_dir(self, request, monkeypatch, tmpdir):
     def keys_dir(self, request, monkeypatch, tmpdir):
-        monkeypatch.setenv('BORG_KEYS_DIR', str(tmpdir))
+        monkeypatch.setenv("BORG_KEYS_DIR", str(tmpdir))
         return tmpdir
         return tmpdir
 
 
-    @pytest.fixture(params=(
-        # not encrypted
-        PlaintextKey,
-        AuthenticatedKey, Blake2AuthenticatedKey,
-        # legacy crypto
-        KeyfileKey, Blake2KeyfileKey,
-        RepoKey, Blake2RepoKey,
-        # new crypto
-        AESOCBKeyfileKey, AESOCBRepoKey,
-        Blake2AESOCBKeyfileKey, Blake2AESOCBRepoKey,
-        CHPOKeyfileKey, CHPORepoKey,
-        Blake2CHPOKeyfileKey, Blake2CHPORepoKey,
-    ))
+    @pytest.fixture(
+        params=(
+            # not encrypted
+            PlaintextKey,
+            AuthenticatedKey,
+            Blake2AuthenticatedKey,
+            # legacy crypto
+            KeyfileKey,
+            Blake2KeyfileKey,
+            RepoKey,
+            Blake2RepoKey,
+            # new crypto
+            AESOCBKeyfileKey,
+            AESOCBRepoKey,
+            Blake2AESOCBKeyfileKey,
+            Blake2AESOCBRepoKey,
+            CHPOKeyfileKey,
+            CHPORepoKey,
+            Blake2CHPOKeyfileKey,
+            Blake2CHPORepoKey,
+        )
+    )
     def key(self, request, monkeypatch):
     def key(self, request, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         return request.param.create(self.MockRepository(), self.MockArgs())
         return request.param.create(self.MockRepository(), self.MockArgs())
 
 
     class MockRepository:
     class MockRepository:
         class _Location:
         class _Location:
-            raw = processed = '/some/place'
+            raw = processed = "/some/place"
 
 
             def canonical_path(self):
             def canonical_path(self):
                 return self.processed
                 return self.processed
@@ -114,16 +136,16 @@ class TestKey:
 
 
     def test_plaintext(self):
     def test_plaintext(self):
         key = PlaintextKey.create(None, None)
         key = PlaintextKey.create(None, None)
-        chunk = b'foo'
+        chunk = b"foo"
         id = key.id_hash(chunk)
         id = key.id_hash(chunk)
-        assert hexlify(id) == b'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'
+        assert hexlify(id) == b"2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
         assert chunk == key.decrypt(id, key.encrypt(id, chunk))
         assert chunk == key.decrypt(id, key.encrypt(id, chunk))
 
 
     def test_keyfile(self, monkeypatch, keys_dir):
     def test_keyfile(self, monkeypatch, keys_dir):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         key = KeyfileKey.create(self.MockRepository(), self.MockArgs())
         key = KeyfileKey.create(self.MockRepository(), self.MockArgs())
         assert key.cipher.next_iv() == 0
         assert key.cipher.next_iv() == 0
-        chunk = b'ABC'
+        chunk = b"ABC"
         id = key.id_hash(chunk)
         id = key.id_hash(chunk)
         manifest = key.encrypt(id, chunk)
         manifest = key.encrypt(id, chunk)
         assert key.cipher.extract_iv(manifest) == 0
         assert key.cipher.extract_iv(manifest) == 0
@@ -137,18 +159,18 @@ class TestKey:
         # Key data sanity check
         # Key data sanity check
         assert len({key2.id_key, key2.enc_key, key2.enc_hmac_key}) == 3
         assert len({key2.id_key, key2.enc_key, key2.enc_hmac_key}) == 3
         assert key2.chunk_seed != 0
         assert key2.chunk_seed != 0
-        chunk = b'foo'
+        chunk = b"foo"
         id = key.id_hash(chunk)
         id = key.id_hash(chunk)
         assert chunk == key2.decrypt(id, key.encrypt(id, chunk))
         assert chunk == key2.decrypt(id, key.encrypt(id, chunk))
 
 
     def test_keyfile_kfenv(self, tmpdir, monkeypatch):
     def test_keyfile_kfenv(self, tmpdir, monkeypatch):
-        keyfile = tmpdir.join('keyfile')
-        monkeypatch.setenv('BORG_KEY_FILE', str(keyfile))
-        monkeypatch.setenv('BORG_PASSPHRASE', 'testkf')
+        keyfile = tmpdir.join("keyfile")
+        monkeypatch.setenv("BORG_KEY_FILE", str(keyfile))
+        monkeypatch.setenv("BORG_PASSPHRASE", "testkf")
         assert not keyfile.exists()
         assert not keyfile.exists()
         key = CHPOKeyfileKey.create(self.MockRepository(), self.MockArgs())
         key = CHPOKeyfileKey.create(self.MockRepository(), self.MockArgs())
         assert keyfile.exists()
         assert keyfile.exists()
-        chunk = b'ABC'
+        chunk = b"ABC"
         chunk_id = key.id_hash(chunk)
         chunk_id = key.id_hash(chunk)
         chunk_cdata = key.encrypt(chunk_id, chunk)
         chunk_cdata = key.encrypt(chunk_id, chunk)
         key = CHPOKeyfileKey.detect(self.MockRepository(), chunk_cdata)
         key = CHPOKeyfileKey.detect(self.MockRepository(), chunk_cdata)
@@ -158,27 +180,27 @@ class TestKey:
             CHPOKeyfileKey.detect(self.MockRepository(), chunk_cdata)
             CHPOKeyfileKey.detect(self.MockRepository(), chunk_cdata)
 
 
     def test_keyfile2(self, monkeypatch, keys_dir):
     def test_keyfile2(self, monkeypatch, keys_dir):
-        with keys_dir.join('keyfile').open('w') as fd:
+        with keys_dir.join("keyfile").open("w") as fd:
             fd.write(self.keyfile2_key_file)
             fd.write(self.keyfile2_key_file)
-        monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
+        monkeypatch.setenv("BORG_PASSPHRASE", "passphrase")
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
-        assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b'payload'
+        assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b"payload"
 
 
     def test_keyfile2_kfenv(self, tmpdir, monkeypatch):
     def test_keyfile2_kfenv(self, tmpdir, monkeypatch):
-        keyfile = tmpdir.join('keyfile')
-        with keyfile.open('w') as fd:
+        keyfile = tmpdir.join("keyfile")
+        with keyfile.open("w") as fd:
             fd.write(self.keyfile2_key_file)
             fd.write(self.keyfile2_key_file)
-        monkeypatch.setenv('BORG_KEY_FILE', str(keyfile))
-        monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
+        monkeypatch.setenv("BORG_KEY_FILE", str(keyfile))
+        monkeypatch.setenv("BORG_PASSPHRASE", "passphrase")
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
-        assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b'payload'
+        assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata) == b"payload"
 
 
     def test_keyfile_blake2(self, monkeypatch, keys_dir):
     def test_keyfile_blake2(self, monkeypatch, keys_dir):
-        with keys_dir.join('keyfile').open('w') as fd:
+        with keys_dir.join("keyfile").open("w") as fd:
             fd.write(self.keyfile_blake2_key_file)
             fd.write(self.keyfile_blake2_key_file)
-        monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
+        monkeypatch.setenv("BORG_PASSPHRASE", "passphrase")
         key = Blake2KeyfileKey.detect(self.MockRepository(), self.keyfile_blake2_cdata)
         key = Blake2KeyfileKey.detect(self.MockRepository(), self.keyfile_blake2_cdata)
-        assert key.decrypt(self.keyfile_blake2_id, self.keyfile_blake2_cdata) == b'payload'
+        assert key.decrypt(self.keyfile_blake2_id, self.keyfile_blake2_cdata) == b"payload"
 
 
     def _corrupt_byte(self, key, data, offset):
     def _corrupt_byte(self, key, data, offset):
         data = bytearray(data)
         data = bytearray(data)
@@ -186,12 +208,12 @@ class TestKey:
         # will trigger an IntegrityError (does not happen while we stay within TYPES_ACCEPTABLE).
         # will trigger an IntegrityError (does not happen while we stay within TYPES_ACCEPTABLE).
         data[offset] ^= 64
         data[offset] ^= 64
         with pytest.raises(IntegrityErrorBase):
         with pytest.raises(IntegrityErrorBase):
-            key.decrypt(b'', data)
+            key.decrypt(b"", data)
 
 
     def test_decrypt_integrity(self, monkeypatch, keys_dir):
     def test_decrypt_integrity(self, monkeypatch, keys_dir):
-        with keys_dir.join('keyfile').open('w') as fd:
+        with keys_dir.join("keyfile").open("w") as fd:
             fd.write(self.keyfile2_key_file)
             fd.write(self.keyfile2_key_file)
-        monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
+        monkeypatch.setenv("BORG_PASSPHRASE", "passphrase")
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
         key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
 
 
         data = self.keyfile2_cdata
         data = self.keyfile2_cdata
@@ -206,7 +228,7 @@ class TestKey:
 
 
     def test_roundtrip(self, key):
     def test_roundtrip(self, key):
         repository = key.repository
         repository = key.repository
-        plaintext = b'foo'
+        plaintext = b"foo"
         id = key.id_hash(plaintext)
         id = key.id_hash(plaintext)
         encrypted = key.encrypt(id, plaintext)
         encrypted = key.encrypt(id, plaintext)
         identified_key_class = identify_key(encrypted)
         identified_key_class = identify_key(encrypted)
@@ -216,59 +238,59 @@ class TestKey:
         assert decrypted == plaintext
         assert decrypted == plaintext
 
 
     def test_decrypt_decompress(self, key):
     def test_decrypt_decompress(self, key):
-        plaintext = b'123456789'
+        plaintext = b"123456789"
         id = key.id_hash(plaintext)
         id = key.id_hash(plaintext)
         encrypted = key.encrypt(id, plaintext)
         encrypted = key.encrypt(id, plaintext)
         assert key.decrypt(id, encrypted, decompress=False) != plaintext
         assert key.decrypt(id, encrypted, decompress=False) != plaintext
         assert key.decrypt(id, encrypted) == plaintext
         assert key.decrypt(id, encrypted) == plaintext
 
 
     def test_assert_id(self, key):
     def test_assert_id(self, key):
-        plaintext = b'123456789'
+        plaintext = b"123456789"
         id = key.id_hash(plaintext)
         id = key.id_hash(plaintext)
         key.assert_id(id, plaintext)
         key.assert_id(id, plaintext)
         id_changed = bytearray(id)
         id_changed = bytearray(id)
         id_changed[0] ^= 1
         id_changed[0] ^= 1
         with pytest.raises(IntegrityError):
         with pytest.raises(IntegrityError):
             key.assert_id(id_changed, plaintext)
             key.assert_id(id_changed, plaintext)
-        plaintext_changed = plaintext + b'1'
+        plaintext_changed = plaintext + b"1"
         with pytest.raises(IntegrityError):
         with pytest.raises(IntegrityError):
             key.assert_id(id, plaintext_changed)
             key.assert_id(id, plaintext_changed)
 
 
     def test_authenticated_encrypt(self, monkeypatch):
     def test_authenticated_encrypt(self, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         key = AuthenticatedKey.create(self.MockRepository(), self.MockArgs())
         key = AuthenticatedKey.create(self.MockRepository(), self.MockArgs())
         assert AuthenticatedKey.id_hash is ID_HMAC_SHA_256.id_hash
         assert AuthenticatedKey.id_hash is ID_HMAC_SHA_256.id_hash
         assert len(key.id_key) == 32
         assert len(key.id_key) == 32
-        plaintext = b'123456789'
+        plaintext = b"123456789"
         id = key.id_hash(plaintext)
         id = key.id_hash(plaintext)
         authenticated = key.encrypt(id, plaintext)
         authenticated = key.encrypt(id, plaintext)
         # 0x07 is the key TYPE, \x00ff identifies no compression / unknown level.
         # 0x07 is the key TYPE, \x00ff identifies no compression / unknown level.
-        assert authenticated == b'\x07\x00\xff' + plaintext
+        assert authenticated == b"\x07\x00\xff" + plaintext
 
 
     def test_blake2_authenticated_encrypt(self, monkeypatch):
     def test_blake2_authenticated_encrypt(self, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         key = Blake2AuthenticatedKey.create(self.MockRepository(), self.MockArgs())
         key = Blake2AuthenticatedKey.create(self.MockRepository(), self.MockArgs())
         assert Blake2AuthenticatedKey.id_hash is ID_BLAKE2b_256.id_hash
         assert Blake2AuthenticatedKey.id_hash is ID_BLAKE2b_256.id_hash
         assert len(key.id_key) == 128
         assert len(key.id_key) == 128
-        plaintext = b'123456789'
+        plaintext = b"123456789"
         id = key.id_hash(plaintext)
         id = key.id_hash(plaintext)
         authenticated = key.encrypt(id, plaintext)
         authenticated = key.encrypt(id, plaintext)
         # 0x06 is the key TYPE, 0x00ff identifies no compression / unknown level.
         # 0x06 is the key TYPE, 0x00ff identifies no compression / unknown level.
-        assert authenticated == b'\x06\x00\xff' + plaintext
+        assert authenticated == b"\x06\x00\xff" + plaintext
 
 
 
 
 class TestTAM:
 class TestTAM:
     @pytest.fixture
     @pytest.fixture
     def key(self, monkeypatch):
     def key(self, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         return CHPOKeyfileKey.create(TestKey.MockRepository(), TestKey.MockArgs())
         return CHPOKeyfileKey.create(TestKey.MockRepository(), TestKey.MockArgs())
 
 
     def test_unpack_future(self, key):
     def test_unpack_future(self, key):
-        blob = b'\xc1\xc1\xc1\xc1foobar'
+        blob = b"\xc1\xc1\xc1\xc1foobar"
         with pytest.raises(UnsupportedManifestError):
         with pytest.raises(UnsupportedManifestError):
             key.unpack_and_verify_manifest(blob)
             key.unpack_and_verify_manifest(blob)
 
 
-        blob = b'\xc1\xc1\xc1'
+        blob = b"\xc1\xc1\xc1"
         with pytest.raises(msgpack.UnpackException):
         with pytest.raises(msgpack.UnpackException):
             key.unpack_and_verify_manifest(blob)
             key.unpack_and_verify_manifest(blob)
 
 
@@ -285,84 +307,66 @@ class TestTAM:
         assert not verified
         assert not verified
 
 
     def test_unknown_type_when_required(self, key):
     def test_unknown_type_when_required(self, key):
-        blob = msgpack.packb({
-            'tam': {
-                'type': 'HMAC_VOLLBIT',
-            },
-        })
+        blob = msgpack.packb({"tam": {"type": "HMAC_VOLLBIT"}})
         with pytest.raises(TAMUnsupportedSuiteError):
         with pytest.raises(TAMUnsupportedSuiteError):
             key.unpack_and_verify_manifest(blob)
             key.unpack_and_verify_manifest(blob)
 
 
     def test_unknown_type(self, key):
     def test_unknown_type(self, key):
-        blob = msgpack.packb({
-            'tam': {
-                'type': 'HMAC_VOLLBIT',
-            },
-        })
+        blob = msgpack.packb({"tam": {"type": "HMAC_VOLLBIT"}})
         key.tam_required = False
         key.tam_required = False
         unpacked, verified = key.unpack_and_verify_manifest(blob)
         unpacked, verified = key.unpack_and_verify_manifest(blob)
         assert unpacked == {}
         assert unpacked == {}
         assert not verified
         assert not verified
 
 
-    @pytest.mark.parametrize('tam, exc', (
-        ({}, TAMUnsupportedSuiteError),
-        ({'type': b'\xff'}, TAMUnsupportedSuiteError),
-        (None, TAMInvalid),
-        (1234, TAMInvalid),
-    ))
+    @pytest.mark.parametrize(
+        "tam, exc",
+        (
+            ({}, TAMUnsupportedSuiteError),
+            ({"type": b"\xff"}, TAMUnsupportedSuiteError),
+            (None, TAMInvalid),
+            (1234, TAMInvalid),
+        ),
+    )
     def test_invalid(self, key, tam, exc):
     def test_invalid(self, key, tam, exc):
-        blob = msgpack.packb({
-            'tam': tam,
-        })
+        blob = msgpack.packb({"tam": tam})
         with pytest.raises(exc):
         with pytest.raises(exc):
             key.unpack_and_verify_manifest(blob)
             key.unpack_and_verify_manifest(blob)
 
 
-    @pytest.mark.parametrize('hmac, salt', (
-        ({}, bytes(64)),
-        (bytes(64), {}),
-        (None, bytes(64)),
-        (bytes(64), None),
-    ))
+    @pytest.mark.parametrize("hmac, salt", (({}, bytes(64)), (bytes(64), {}), (None, bytes(64)), (bytes(64), None)))
     def test_wrong_types(self, key, hmac, salt):
     def test_wrong_types(self, key, hmac, salt):
-        data = {
-            'tam': {
-                'type': 'HKDF_HMAC_SHA512',
-                'hmac': hmac,
-                'salt': salt
-            },
-        }
-        tam = data['tam']
+        data = {"tam": {"type": "HKDF_HMAC_SHA512", "hmac": hmac, "salt": salt}}
+        tam = data["tam"]
         if hmac is None:
         if hmac is None:
-            del tam['hmac']
+            del tam["hmac"]
         if salt is None:
         if salt is None:
-            del tam['salt']
+            del tam["salt"]
         blob = msgpack.packb(data)
         blob = msgpack.packb(data)
         with pytest.raises(TAMInvalid):
         with pytest.raises(TAMInvalid):
             key.unpack_and_verify_manifest(blob)
             key.unpack_and_verify_manifest(blob)
 
 
     def test_round_trip(self, key):
     def test_round_trip(self, key):
-        data = {'foo': 'bar'}
+        data = {"foo": "bar"}
         blob = key.pack_and_authenticate_metadata(data)
         blob = key.pack_and_authenticate_metadata(data)
-        assert blob.startswith(b'\x82')
+        assert blob.startswith(b"\x82")
 
 
         unpacked = msgpack.unpackb(blob)
         unpacked = msgpack.unpackb(blob)
-        assert unpacked['tam']['type'] == 'HKDF_HMAC_SHA512'
+        assert unpacked["tam"]["type"] == "HKDF_HMAC_SHA512"
 
 
         unpacked, verified = key.unpack_and_verify_manifest(blob)
         unpacked, verified = key.unpack_and_verify_manifest(blob)
         assert verified
         assert verified
-        assert unpacked['foo'] == 'bar'
-        assert 'tam' not in unpacked
+        assert unpacked["foo"] == "bar"
+        assert "tam" not in unpacked
 
 
-    @pytest.mark.parametrize('which', ('hmac', 'salt'))
+    @pytest.mark.parametrize("which", ("hmac", "salt"))
     def test_tampered(self, key, which):
     def test_tampered(self, key, which):
-        data = {'foo': 'bar'}
+        data = {"foo": "bar"}
         blob = key.pack_and_authenticate_metadata(data)
         blob = key.pack_and_authenticate_metadata(data)
-        assert blob.startswith(b'\x82')
+        assert blob.startswith(b"\x82")
 
 
         unpacked = msgpack.unpackb(blob, object_hook=StableDict)
         unpacked = msgpack.unpackb(blob, object_hook=StableDict)
-        assert len(unpacked['tam'][which]) == 64
-        unpacked['tam'][which] = unpacked['tam'][which][0:32] + bytes(32)
-        assert len(unpacked['tam'][which]) == 64
+        assert len(unpacked["tam"][which]) == 64
+        unpacked["tam"][which] = unpacked["tam"][which][0:32] + bytes(32)
+        assert len(unpacked["tam"][which]) == 64
         blob = msgpack.packb(unpacked)
         blob = msgpack.packb(unpacked)
 
 
         with pytest.raises(TAMInvalid):
         with pytest.raises(TAMInvalid):
@@ -372,10 +376,7 @@ class TestTAM:
 def test_decrypt_key_file_unsupported_algorithm():
 def test_decrypt_key_file_unsupported_algorithm():
     """We will add more algorithms in the future. We should raise a helpful error."""
     """We will add more algorithms in the future. We should raise a helpful error."""
     key = CHPOKeyfileKey(None)
     key = CHPOKeyfileKey(None)
-    encrypted = msgpack.packb({
-        'algorithm': 'THIS ALGORITHM IS NOT SUPPORTED',
-        'version': 1,
-    })
+    encrypted = msgpack.packb({"algorithm": "THIS ALGORITHM IS NOT SUPPORTED", "version": 1})
 
 
     with pytest.raises(UnsupportedKeyFormatError):
     with pytest.raises(UnsupportedKeyFormatError):
         key.decrypt_key_file(encrypted, "hello, pass phrase")
         key.decrypt_key_file(encrypted, "hello, pass phrase")
@@ -384,9 +385,7 @@ def test_decrypt_key_file_unsupported_algorithm():
 def test_decrypt_key_file_v2_is_unsupported():
 def test_decrypt_key_file_v2_is_unsupported():
     """There may eventually be a version 2 of the format. For now we should raise a helpful error."""
     """There may eventually be a version 2 of the format. For now we should raise a helpful error."""
     key = CHPOKeyfileKey(None)
     key = CHPOKeyfileKey(None)
-    encrypted = msgpack.packb({
-        'version': 2,
-    })
+    encrypted = msgpack.packb({"version": 2})
 
 
     with pytest.raises(UnsupportedKeyFormatError):
     with pytest.raises(UnsupportedKeyFormatError):
         key.decrypt_key_file(encrypted, "hello, pass phrase")
         key.decrypt_key_file(encrypted, "hello, pass phrase")
@@ -394,16 +393,16 @@ def test_decrypt_key_file_v2_is_unsupported():
 
 
 def test_key_file_roundtrip(monkeypatch):
 def test_key_file_roundtrip(monkeypatch):
     def to_dict(key):
     def to_dict(key):
-        extract = 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'
+        extract = "repository_id", "enc_key", "enc_hmac_key", "id_key", "chunk_seed"
         return {a: getattr(key, a) for a in extract}
         return {a: getattr(key, a) for a in extract}
 
 
-    repository = MagicMock(id=b'repository_id')
-    monkeypatch.setenv('BORG_PASSPHRASE', "hello, pass phrase")
+    repository = MagicMock(id=b"repository_id")
+    monkeypatch.setenv("BORG_PASSPHRASE", "hello, pass phrase")
 
 
-    save_me = AESOCBRepoKey.create(repository, args=MagicMock(key_algorithm='argon2'))
+    save_me = AESOCBRepoKey.create(repository, args=MagicMock(key_algorithm="argon2"))
     saved = repository.save_key.call_args.args[0]
     saved = repository.save_key.call_args.args[0]
     repository.load_key.return_value = saved
     repository.load_key.return_value = saved
     load_me = AESOCBRepoKey.detect(repository, manifest_data=None)
     load_me = AESOCBRepoKey.detect(repository, manifest_data=None)
 
 
     assert to_dict(load_me) == to_dict(save_me)
     assert to_dict(load_me) == to_dict(save_me)
-    assert msgpack.unpackb(a2b_base64(saved))['algorithm'] == KEY_ALGORITHMS['argon2']
+    assert msgpack.unpackb(a2b_base64(saved))["algorithm"] == KEY_ALGORITHMS["argon2"]

+ 65 - 23
src/borg/testsuite/locking.py

@@ -6,8 +6,19 @@ from traceback import format_exc
 import pytest
 import pytest
 
 
 from ..platform import get_process_id, process_alive
 from ..platform import get_process_id, process_alive
-from ..locking import TimeoutTimer, ExclusiveLock, Lock, LockRoster, \
-                      ADD, REMOVE, SHARED, EXCLUSIVE, LockTimeout, NotLocked, NotMyLock
+from ..locking import (
+    TimeoutTimer,
+    ExclusiveLock,
+    Lock,
+    LockRoster,
+    ADD,
+    REMOVE,
+    SHARED,
+    EXCLUSIVE,
+    LockTimeout,
+    NotLocked,
+    NotMyLock,
+)
 
 
 ID1 = "foo", 1, 1
 ID1 = "foo", 1, 1
 ID2 = "bar", 2, 2
 ID2 = "bar", 2, 2
@@ -45,7 +56,7 @@ class TestTimeoutTimer:
 
 
 @pytest.fixture()
 @pytest.fixture()
 def lockpath(tmpdir):
 def lockpath(tmpdir):
-    return str(tmpdir.join('lock'))
+    return str(tmpdir.join("lock"))
 
 
 
 
 class TestExclusiveLock:
 class TestExclusiveLock:
@@ -67,7 +78,7 @@ class TestExclusiveLock:
     def test_kill_stale(self, lockpath, free_pid):
     def test_kill_stale(self, lockpath, free_pid):
         host, pid, tid = our_id = get_process_id()
         host, pid, tid = our_id = get_process_id()
         dead_id = host, free_pid, tid
         dead_id = host, free_pid, tid
-        cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
+        cant_know_if_dead_id = "foo.bar.example.net", 1, 2
 
 
         dead_lock = ExclusiveLock(lockpath, id=dead_id).acquire()
         dead_lock = ExclusiveLock(lockpath, id=dead_id).acquire()
         with ExclusiveLock(lockpath, id=our_id):
         with ExclusiveLock(lockpath, id=our_id):
@@ -94,9 +105,7 @@ class TestExclusiveLock:
         assert old_unique_name != new_unique_name  # locking filename is different now
         assert old_unique_name != new_unique_name  # locking filename is different now
 
 
     def test_race_condition(self, lockpath):
     def test_race_condition(self, lockpath):
-
         class SynchronizedCounter:
         class SynchronizedCounter:
-
             def __init__(self, count=0):
             def __init__(self, count=0):
                 self.lock = ThreadingLock()
                 self.lock = ThreadingLock()
                 self.count = count
                 self.count = count
@@ -126,44 +135,77 @@ class TestExclusiveLock:
             with print_lock:
             with print_lock:
                 print(msg)
                 print(msg)
 
 
-        def acquire_release_loop(id, timeout, thread_id, lock_owner_counter, exception_counter, print_lock, last_thread=None):
-            print_locked("Thread %2d: Starting acquire_release_loop(id=%s, timeout=%d); lockpath=%s" % (thread_id, id, timeout, lockpath))
+        def acquire_release_loop(
+            id, timeout, thread_id, lock_owner_counter, exception_counter, print_lock, last_thread=None
+        ):
+            print_locked(
+                "Thread %2d: Starting acquire_release_loop(id=%s, timeout=%d); lockpath=%s"
+                % (thread_id, id, timeout, lockpath)
+            )
             timer = TimeoutTimer(timeout, -1).start()
             timer = TimeoutTimer(timeout, -1).start()
             cycle = 0
             cycle = 0
 
 
             while not timer.timed_out():
             while not timer.timed_out():
                 cycle += 1
                 cycle += 1
                 try:
                 try:
-                    with ExclusiveLock(lockpath, id=id, timeout=timeout/20, sleep=-1):  # This timeout is only for not exceeding the given timeout by more than 5%. With sleep<0 it's constantly polling anyway.
+                    with ExclusiveLock(
+                        lockpath, id=id, timeout=timeout / 20, sleep=-1
+                    ):  # This timeout is only for not exceeding the given timeout by more than 5%. With sleep<0 it's constantly polling anyway.
                         lock_owner_count = lock_owner_counter.incr()
                         lock_owner_count = lock_owner_counter.incr()
-                        print_locked("Thread %2d: Acquired the lock. It's my %d. loop cycle. I am the %d. who has the lock concurrently." % (thread_id, cycle, lock_owner_count))
+                        print_locked(
+                            "Thread %2d: Acquired the lock. It's my %d. loop cycle. I am the %d. who has the lock concurrently."
+                            % (thread_id, cycle, lock_owner_count)
+                        )
                         time.sleep(0.005)
                         time.sleep(0.005)
                         lock_owner_count = lock_owner_counter.decr()
                         lock_owner_count = lock_owner_counter.decr()
-                        print_locked("Thread %2d: Releasing the lock, finishing my %d. loop cycle. Currently, %d colleagues still have the lock." % (thread_id, cycle, lock_owner_count))
+                        print_locked(
+                            "Thread %2d: Releasing the lock, finishing my %d. loop cycle. Currently, %d colleagues still have the lock."
+                            % (thread_id, cycle, lock_owner_count)
+                        )
                 except LockTimeout:
                 except LockTimeout:
                     print_locked("Thread %2d: Got LockTimeout, finishing my %d. loop cycle." % (thread_id, cycle))
                     print_locked("Thread %2d: Got LockTimeout, finishing my %d. loop cycle." % (thread_id, cycle))
                 except:
                 except:
                     exception_count = exception_counter.incr()
                     exception_count = exception_counter.incr()
                     e = format_exc()
                     e = format_exc()
-                    print_locked("Thread %2d: Exception thrown, finishing my %d. loop cycle. It's the %d. exception seen until now: %s" % (thread_id, cycle, exception_count, e))
+                    print_locked(
+                        "Thread %2d: Exception thrown, finishing my %d. loop cycle. It's the %d. exception seen until now: %s"
+                        % (thread_id, cycle, exception_count, e)
+                    )
 
 
             print_locked("Thread %2d: Loop timed out--terminating after %d loop cycles." % (thread_id, cycle))
             print_locked("Thread %2d: Loop timed out--terminating after %d loop cycles." % (thread_id, cycle))
             if last_thread is not None:  # joining its predecessor, if any
             if last_thread is not None:  # joining its predecessor, if any
                 last_thread.join()
                 last_thread.join()
 
 
-        print('')
+        print("")
         lock_owner_counter = SynchronizedCounter()
         lock_owner_counter = SynchronizedCounter()
         exception_counter = SynchronizedCounter()
         exception_counter = SynchronizedCounter()
         print_lock = ThreadingLock()
         print_lock = ThreadingLock()
         thread = None
         thread = None
         for thread_id in range(RACE_TEST_NUM_THREADS):
         for thread_id in range(RACE_TEST_NUM_THREADS):
-            thread = Thread(target=acquire_release_loop, args=(('foo', thread_id, 0), RACE_TEST_DURATION, thread_id, lock_owner_counter, exception_counter, print_lock, thread))
+            thread = Thread(
+                target=acquire_release_loop,
+                args=(
+                    ("foo", thread_id, 0),
+                    RACE_TEST_DURATION,
+                    thread_id,
+                    lock_owner_counter,
+                    exception_counter,
+                    print_lock,
+                    thread,
+                ),
+            )
             thread.start()
             thread.start()
         thread.join()  # joining the last thread
         thread.join()  # joining the last thread
 
 
-        assert lock_owner_counter.maxvalue() > 0, 'Never gained the lock? Something went wrong here...'
-        assert lock_owner_counter.maxvalue() <= 1, "Maximal number of concurrent lock holders was %d. So exclusivity is broken." % (lock_owner_counter.maxvalue())
-        assert exception_counter.value() == 0, "ExclusiveLock threw %d exceptions due to unclean concurrency handling." % (exception_counter.value())
+        assert lock_owner_counter.maxvalue() > 0, "Never gained the lock? Something went wrong here..."
+        assert (
+            lock_owner_counter.maxvalue() <= 1
+        ), "Maximal number of concurrent lock holders was %d. So exclusivity is broken." % (
+            lock_owner_counter.maxvalue()
+        )
+        assert (
+            exception_counter.value() == 0
+        ), "ExclusiveLock threw %d exceptions due to unclean concurrency handling." % (exception_counter.value())
 
 
 
 
 class TestLock:
 class TestLock:
@@ -228,7 +270,7 @@ class TestLock:
     def test_kill_stale(self, lockpath, free_pid):
     def test_kill_stale(self, lockpath, free_pid):
         host, pid, tid = our_id = get_process_id()
         host, pid, tid = our_id = get_process_id()
         dead_id = host, free_pid, tid
         dead_id = host, free_pid, tid
-        cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
+        cant_know_if_dead_id = "foo.bar.example.net", 1, 2
 
 
         dead_lock = Lock(lockpath, id=dead_id, exclusive=True).acquire()
         dead_lock = Lock(lockpath, id=dead_id, exclusive=True).acquire()
         roster = dead_lock._roster
         roster = dead_lock._roster
@@ -263,7 +305,7 @@ class TestLock:
 
 
 @pytest.fixture()
 @pytest.fixture()
 def rosterpath(tmpdir):
 def rosterpath(tmpdir):
-    return str(tmpdir.join('roster'))
+    return str(tmpdir.join("roster"))
 
 
 
 
 class TestLockRoster:
 class TestLockRoster:
@@ -277,13 +319,13 @@ class TestLockRoster:
         roster1 = LockRoster(rosterpath, id=ID1)
         roster1 = LockRoster(rosterpath, id=ID1)
         assert roster1.get(SHARED) == set()
         assert roster1.get(SHARED) == set()
         roster1.modify(SHARED, ADD)
         roster1.modify(SHARED, ADD)
-        assert roster1.get(SHARED) == {ID1, }
+        assert roster1.get(SHARED) == {ID1}
         roster2 = LockRoster(rosterpath, id=ID2)
         roster2 = LockRoster(rosterpath, id=ID2)
         roster2.modify(SHARED, ADD)
         roster2.modify(SHARED, ADD)
-        assert roster2.get(SHARED) == {ID1, ID2, }
+        assert roster2.get(SHARED) == {ID1, ID2}
         roster1 = LockRoster(rosterpath, id=ID1)
         roster1 = LockRoster(rosterpath, id=ID1)
         roster1.modify(SHARED, REMOVE)
         roster1.modify(SHARED, REMOVE)
-        assert roster1.get(SHARED) == {ID2, }
+        assert roster1.get(SHARED) == {ID2}
         roster2 = LockRoster(rosterpath, id=ID2)
         roster2 = LockRoster(rosterpath, id=ID2)
         roster2.modify(SHARED, REMOVE)
         roster2.modify(SHARED, REMOVE)
         assert roster2.get(SHARED) == set()
         assert roster2.get(SHARED) == set()
@@ -300,7 +342,7 @@ class TestLockRoster:
         assert roster1.get(SHARED) == {dead_id}
         assert roster1.get(SHARED) == {dead_id}
 
 
         # put a unknown-state remote process lock into roster
         # put a unknown-state remote process lock into roster
-        cant_know_if_dead_id = 'foo.bar.example.net', 1, 2
+        cant_know_if_dead_id = "foo.bar.example.net", 1, 2
         roster1 = LockRoster(rosterpath, id=cant_know_if_dead_id)
         roster1 = LockRoster(rosterpath, id=cant_know_if_dead_id)
         roster1.kill_stale_locks = False
         roster1.kill_stale_locks = False
         assert roster1.get(SHARED) == {dead_id}
         assert roster1.get(SHARED) == {dead_id}

+ 8 - 7
src/borg/testsuite/logger.py

@@ -4,6 +4,7 @@ from io import StringIO
 import pytest
 import pytest
 
 
 from ..logger import find_parent_module, create_logger, setup_logging
 from ..logger import find_parent_module, create_logger, setup_logging
+
 logger = create_logger()
 logger = create_logger()
 
 
 
 
@@ -11,27 +12,27 @@ logger = create_logger()
 def io_logger():
 def io_logger():
     io = StringIO()
     io = StringIO()
     handler = setup_logging(stream=io, env_var=None)
     handler = setup_logging(stream=io, env_var=None)
-    handler.setFormatter(logging.Formatter('%(name)s: %(message)s'))
+    handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
     logger.setLevel(logging.DEBUG)
     logger.setLevel(logging.DEBUG)
     return io
     return io
 
 
 
 
 def test_setup_logging(io_logger):
 def test_setup_logging(io_logger):
-    logger.info('hello world')
+    logger.info("hello world")
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world\n"
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world\n"
 
 
 
 
 def test_multiple_loggers(io_logger):
 def test_multiple_loggers(io_logger):
     logger = logging.getLogger(__name__)
     logger = logging.getLogger(__name__)
-    logger.info('hello world 1')
+    logger.info("hello world 1")
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\n"
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\n"
-    logger = logging.getLogger('borg.testsuite.logger')
-    logger.info('hello world 2')
+    logger = logging.getLogger("borg.testsuite.logger")
+    logger.info("hello world 2")
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\nborg.testsuite.logger: hello world 2\n"
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 1\nborg.testsuite.logger: hello world 2\n"
     io_logger.truncate(0)
     io_logger.truncate(0)
     io_logger.seek(0)
     io_logger.seek(0)
-    logger = logging.getLogger('borg.testsuite.logger')
-    logger.info('hello world 2')
+    logger = logging.getLogger("borg.testsuite.logger")
+    logger.info("hello world 2")
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 2\n"
     assert io_logger.getvalue() == "borg.testsuite.logger: hello world 2\n"
 
 
 
 

+ 16 - 17
src/borg/testsuite/lrucache.py

@@ -6,33 +6,32 @@ from ..lrucache import LRUCache
 
 
 
 
 class TestLRUCache:
 class TestLRUCache:
-
     def test_lrucache(self):
     def test_lrucache(self):
         c = LRUCache(2, dispose=lambda _: None)
         c = LRUCache(2, dispose=lambda _: None)
         assert len(c) == 0
         assert len(c) == 0
         assert c.items() == set()
         assert c.items() == set()
-        for i, x in enumerate('abc'):
+        for i, x in enumerate("abc"):
             c[x] = i
             c[x] = i
         assert len(c) == 2
         assert len(c) == 2
-        assert c.items() == {('b', 1), ('c', 2)}
-        assert 'a' not in c
-        assert 'b' in c
+        assert c.items() == {("b", 1), ("c", 2)}
+        assert "a" not in c
+        assert "b" in c
         with pytest.raises(KeyError):
         with pytest.raises(KeyError):
-            c['a']
-        assert c.get('a') is None
-        assert c.get('a', 'foo') == 'foo'
-        assert c['b'] == 1
-        assert c.get('b') == 1
-        assert c['c'] == 2
-        c['d'] = 3
+            c["a"]
+        assert c.get("a") is None
+        assert c.get("a", "foo") == "foo"
+        assert c["b"] == 1
+        assert c.get("b") == 1
+        assert c["c"] == 2
+        c["d"] = 3
         assert len(c) == 2
         assert len(c) == 2
-        assert c['c'] == 2
-        assert c['d'] == 3
-        del c['c']
+        assert c["c"] == 2
+        assert c["d"] == 3
+        del c["c"]
         assert len(c) == 1
         assert len(c) == 1
         with pytest.raises(KeyError):
         with pytest.raises(KeyError):
-            c['c']
-        assert c['d'] == 3
+            c["c"]
+        assert c["d"] == 3
         c.clear()
         c.clear()
         assert c.items() == set()
         assert c.items() == set()
 
 

+ 9 - 11
src/borg/testsuite/nanorst.py

@@ -4,15 +4,15 @@ from ..nanorst import rst_to_text
 
 
 
 
 def test_inline():
 def test_inline():
-    assert rst_to_text('*foo* and ``bar``.') == 'foo and bar.'
+    assert rst_to_text("*foo* and ``bar``.") == "foo and bar."
 
 
 
 
 def test_inline_spread():
 def test_inline_spread():
-    assert rst_to_text('*foo and bar, thusly\nfoobar*.') == 'foo and bar, thusly\nfoobar.'
+    assert rst_to_text("*foo and bar, thusly\nfoobar*.") == "foo and bar, thusly\nfoobar."
 
 
 
 
 def test_comment_inline():
 def test_comment_inline():
-    assert rst_to_text('Foo and Bar\n.. foo\nbar') == 'Foo and Bar\n.. foo\nbar'
+    assert rst_to_text("Foo and Bar\n.. foo\nbar") == "Foo and Bar\n.. foo\nbar"
 
 
 
 
 def test_inline_escape():
 def test_inline_escape():
@@ -20,21 +20,19 @@ def test_inline_escape():
 
 
 
 
 def test_comment():
 def test_comment():
-    assert rst_to_text('Foo and Bar\n\n.. foo\nbar') == 'Foo and Bar\n\nbar'
+    assert rst_to_text("Foo and Bar\n\n.. foo\nbar") == "Foo and Bar\n\nbar"
 
 
 
 
 def test_directive_note():
 def test_directive_note():
-    assert rst_to_text('.. note::\n   Note this and that') == 'Note:\n   Note this and that'
+    assert rst_to_text(".. note::\n   Note this and that") == "Note:\n   Note this and that"
 
 
 
 
 def test_ref():
 def test_ref():
-    references = {
-        'foo': 'baz'
-    }
-    assert rst_to_text('See :ref:`fo\no`.', references=references) == 'See baz.'
+    references = {"foo": "baz"}
+    assert rst_to_text("See :ref:`fo\no`.", references=references) == "See baz."
 
 
 
 
 def test_undefined_ref():
 def test_undefined_ref():
     with pytest.raises(ValueError) as exc_info:
     with pytest.raises(ValueError) as exc_info:
-        rst_to_text('See :ref:`foo`.')
-    assert 'Undefined reference' in str(exc_info.value)
+        rst_to_text("See :ref:`foo`.")
+    assert "Undefined reference" in str(exc_info.value)

+ 14 - 15
src/borg/testsuite/nonces.py

@@ -10,10 +10,9 @@ from ..remote import InvalidRPCMethod
 
 
 
 
 class TestNonceManager:
 class TestNonceManager:
-
     class MockRepository:
     class MockRepository:
         class _Location:
         class _Location:
-            orig = '/some/place'
+            orig = "/some/place"
 
 
         _location = _Location()
         _location = _Location()
         id = bytes(32)
         id = bytes(32)
@@ -37,15 +36,15 @@ class TestNonceManager:
         self.repository = None
         self.repository = None
 
 
     def cache_nonce(self):
     def cache_nonce(self):
-        with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce')) as fd:
+        with open(os.path.join(get_security_dir(self.repository.id_str), "nonce")) as fd:
             return fd.read()
             return fd.read()
 
 
     def set_cache_nonce(self, nonce):
     def set_cache_nonce(self, nonce):
-        with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce'), "w") as fd:
+        with open(os.path.join(get_security_dir(self.repository.id_str), "nonce"), "w") as fd:
             assert fd.write(nonce)
             assert fd.write(nonce)
 
 
     def test_empty_cache_and_old_server(self, monkeypatch):
     def test_empty_cache_and_old_server(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockOldRepository()
         self.repository = self.MockOldRepository()
         manager = NonceManager(self.repository, 0x2000)
         manager = NonceManager(self.repository, 0x2000)
@@ -55,7 +54,7 @@ class TestNonceManager:
         assert self.cache_nonce() == "0000000000002033"
         assert self.cache_nonce() == "0000000000002033"
 
 
     def test_empty_cache(self, monkeypatch):
     def test_empty_cache(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = 0x2000
         self.repository.next_free = 0x2000
@@ -66,7 +65,7 @@ class TestNonceManager:
         assert self.cache_nonce() == "0000000000002033"
         assert self.cache_nonce() == "0000000000002033"
 
 
     def test_empty_nonce(self, monkeypatch):
     def test_empty_nonce(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = None
         self.repository.next_free = None
@@ -99,10 +98,10 @@ class TestNonceManager:
         next_nonce = manager.ensure_reservation(0x2043, 64)
         next_nonce = manager.ensure_reservation(0x2043, 64)
         assert next_nonce == 0x2063
         assert next_nonce == 0x2063
         assert self.cache_nonce() == "00000000000020c3"
         assert self.cache_nonce() == "00000000000020c3"
-        assert self.repository.next_free == 0x20c3
+        assert self.repository.next_free == 0x20C3
 
 
     def test_sync_nonce(self, monkeypatch):
     def test_sync_nonce(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = 0x2000
         self.repository.next_free = 0x2000
@@ -116,7 +115,7 @@ class TestNonceManager:
         assert self.repository.next_free == 0x2033
         assert self.repository.next_free == 0x2033
 
 
     def test_server_just_upgraded(self, monkeypatch):
     def test_server_just_upgraded(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = None
         self.repository.next_free = None
@@ -130,7 +129,7 @@ class TestNonceManager:
         assert self.repository.next_free == 0x2033
         assert self.repository.next_free == 0x2033
 
 
     def test_transaction_abort_no_cache(self, monkeypatch):
     def test_transaction_abort_no_cache(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = 0x2000
         self.repository.next_free = 0x2000
@@ -143,7 +142,7 @@ class TestNonceManager:
         assert self.repository.next_free == 0x2033
         assert self.repository.next_free == 0x2033
 
 
     def test_transaction_abort_old_server(self, monkeypatch):
     def test_transaction_abort_old_server(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockOldRepository()
         self.repository = self.MockOldRepository()
         self.set_cache_nonce("0000000000002000")
         self.set_cache_nonce("0000000000002000")
@@ -155,7 +154,7 @@ class TestNonceManager:
         assert self.cache_nonce() == "0000000000002033"
         assert self.cache_nonce() == "0000000000002033"
 
 
     def test_transaction_abort_on_other_client(self, monkeypatch):
     def test_transaction_abort_on_other_client(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = 0x2000
         self.repository.next_free = 0x2000
@@ -169,7 +168,7 @@ class TestNonceManager:
         assert self.repository.next_free == 0x2033
         assert self.repository.next_free == 0x2033
 
 
     def test_interleaved(self, monkeypatch):
     def test_interleaved(self, monkeypatch):
-        monkeypatch.setattr(nonces, 'NONCE_SPACE_RESERVATION', 0x20)
+        monkeypatch.setattr(nonces, "NONCE_SPACE_RESERVATION", 0x20)
 
 
         self.repository = self.MockRepository()
         self.repository = self.MockRepository()
         self.repository.next_free = 0x2000
         self.repository.next_free = 0x2000
@@ -192,7 +191,7 @@ class TestNonceManager:
         assert self.repository.next_free == 0x4000
         assert self.repository.next_free == 0x4000
 
 
         # spans reservation boundary
         # spans reservation boundary
-        next_nonce = manager.ensure_reservation(0x201f, 21)
+        next_nonce = manager.ensure_reservation(0x201F, 21)
         assert next_nonce == 0x4000
         assert next_nonce == 0x4000
         assert self.cache_nonce() == "0000000000004035"
         assert self.cache_nonce() == "0000000000004035"
         assert self.repository.next_free == 0x4035
         assert self.repository.next_free == 0x4035

+ 394 - 255
src/borg/testsuite/patterns.py

@@ -11,8 +11,7 @@ from ..patterns import parse_pattern, PatternMatcher
 
 
 
 
 def check_patterns(files, pattern, expected):
 def check_patterns(files, pattern, expected):
-    """Utility for testing patterns.
-    """
+    """Utility for testing patterns."""
     assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths"
     assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths"
 
 
     matched = [f for f in files if pattern.match(f)]
     matched = [f for f in files if pattern.match(f)]
@@ -20,167 +19,291 @@ def check_patterns(files, pattern, expected):
     assert matched == (files if expected is None else expected)
     assert matched == (files if expected is None else expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("/", []),
-    ("/home", ["home"]),
-    ("/home///", ["home"]),
-    ("/./home", ["home"]),
-    ("/home/user", ["home/user"]),
-    ("/home/user2", ["home/user2"]),
-    ("/home/user/.bashrc", ["home/user/.bashrc"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("/", []),
+        ("/home", ["home"]),
+        ("/home///", ["home"]),
+        ("/./home", ["home"]),
+        ("/home/user", ["home/user"]),
+        ("/home/user2", ["home/user2"]),
+        ("/home/user/.bashrc", ["home/user/.bashrc"]),
+    ],
+)
 def test_patterns_full(pattern, expected):
 def test_patterns_full(pattern, expected):
-    files = ["home", "home/user", "home/user2", "home/user/.bashrc", ]
+    files = ["home", "home/user", "home/user2", "home/user/.bashrc"]
 
 
     check_patterns(files, PathFullPattern(pattern), expected)
     check_patterns(files, PathFullPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("", []),
-    ("relative", []),
-    ("relative/path/", ["relative/path"]),
-    ("relative/path", ["relative/path"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("", []),
+        ("relative", []),
+        ("relative/path/", ["relative/path"]),
+        ("relative/path", ["relative/path"]),
+    ],
+)
 def test_patterns_full_relative(pattern, expected):
 def test_patterns_full_relative(pattern, expected):
-    files = ["relative/path", "relative/path2", ]
+    files = ["relative/path", "relative/path2"]
 
 
     check_patterns(files, PathFullPattern(pattern), expected)
     check_patterns(files, PathFullPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("/", None),
-    ("/./", None),
-    ("", []),
-    ("/home/u", []),
-    ("/home/user", ["home/user/.profile", "home/user/.bashrc"]),
-    ("/etc", ["etc/server/config", "etc/server/hosts"]),
-    ("///etc//////", ["etc/server/config", "etc/server/hosts"]),
-    ("/./home//..//home/user2", ["home/user2/.profile", "home/user2/public_html/index.html"]),
-    ("/srv", ["srv/messages", "srv/dmesg"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("/", None),
+        ("/./", None),
+        ("", []),
+        ("/home/u", []),
+        ("/home/user", ["home/user/.profile", "home/user/.bashrc"]),
+        ("/etc", ["etc/server/config", "etc/server/hosts"]),
+        ("///etc//////", ["etc/server/config", "etc/server/hosts"]),
+        ("/./home//..//home/user2", ["home/user2/.profile", "home/user2/public_html/index.html"]),
+        ("/srv", ["srv/messages", "srv/dmesg"]),
+    ],
+)
 def test_patterns_prefix(pattern, expected):
 def test_patterns_prefix(pattern, expected):
     files = [
     files = [
-        "etc/server/config", "etc/server/hosts", "home", "home/user/.profile", "home/user/.bashrc",
-        "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
+        "etc/server/config",
+        "etc/server/hosts",
+        "home",
+        "home/user/.profile",
+        "home/user/.bashrc",
+        "home/user2/.profile",
+        "home/user2/public_html/index.html",
+        "srv/messages",
+        "srv/dmesg",
     ]
     ]
 
 
     check_patterns(files, PathPrefixPattern(pattern), expected)
     check_patterns(files, PathPrefixPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("", []),
-    ("foo", []),
-    ("relative", ["relative/path1", "relative/two"]),
-    ("more", ["more/relative"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("", []),
+        ("foo", []),
+        ("relative", ["relative/path1", "relative/two"]),
+        ("more", ["more/relative"]),
+    ],
+)
 def test_patterns_prefix_relative(pattern, expected):
 def test_patterns_prefix_relative(pattern, expected):
     files = ["relative/path1", "relative/two", "more/relative"]
     files = ["relative/path1", "relative/two", "more/relative"]
 
 
     check_patterns(files, PathPrefixPattern(pattern), expected)
     check_patterns(files, PathPrefixPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("/*", None),
-    ("/./*", None),
-    ("*", None),
-    ("*/*",
-     ["etc/server/config", "etc/server/hosts", "home/user/.profile", "home/user/.bashrc",
-      "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
-      "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("*///*",
-     ["etc/server/config", "etc/server/hosts", "home/user/.profile", "home/user/.bashrc",
-      "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
-      "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("/home/u", []),
-    ("/home/*",
-     ["home/user/.profile", "home/user/.bashrc", "home/user2/.profile", "home/user2/public_html/index.html",
-      "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("/home/user/*", ["home/user/.profile", "home/user/.bashrc"]),
-    ("/etc/*", ["etc/server/config", "etc/server/hosts"]),
-    ("*/.pr????e", ["home/user/.profile", "home/user2/.profile"]),
-    ("///etc//////*", ["etc/server/config", "etc/server/hosts"]),
-    ("/./home//..//home/user2/*", ["home/user2/.profile", "home/user2/public_html/index.html"]),
-    ("/srv*", ["srv/messages", "srv/dmesg"]),
-    ("/home/*/.thumbnails", ["home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("/*", None),
+        ("/./*", None),
+        ("*", None),
+        (
+            "*/*",
+            [
+                "etc/server/config",
+                "etc/server/hosts",
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "srv/messages",
+                "srv/dmesg",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        (
+            "*///*",
+            [
+                "etc/server/config",
+                "etc/server/hosts",
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "srv/messages",
+                "srv/dmesg",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        ("/home/u", []),
+        (
+            "/home/*",
+            [
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        ("/home/user/*", ["home/user/.profile", "home/user/.bashrc"]),
+        ("/etc/*", ["etc/server/config", "etc/server/hosts"]),
+        ("*/.pr????e", ["home/user/.profile", "home/user2/.profile"]),
+        ("///etc//////*", ["etc/server/config", "etc/server/hosts"]),
+        ("/./home//..//home/user2/*", ["home/user2/.profile", "home/user2/public_html/index.html"]),
+        ("/srv*", ["srv/messages", "srv/dmesg"]),
+        ("/home/*/.thumbnails", ["home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
+    ],
+)
 def test_patterns_fnmatch(pattern, expected):
 def test_patterns_fnmatch(pattern, expected):
     files = [
     files = [
-        "etc/server/config", "etc/server/hosts", "home", "home/user/.profile", "home/user/.bashrc",
-        "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
-        "home/foo/.thumbnails", "home/foo/bar/.thumbnails",
+        "etc/server/config",
+        "etc/server/hosts",
+        "home",
+        "home/user/.profile",
+        "home/user/.bashrc",
+        "home/user2/.profile",
+        "home/user2/public_html/index.html",
+        "srv/messages",
+        "srv/dmesg",
+        "home/foo/.thumbnails",
+        "home/foo/bar/.thumbnails",
     ]
     ]
 
 
     check_patterns(files, FnmatchPattern(pattern), expected)
     check_patterns(files, FnmatchPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("*", None),
-    ("**/*", None),
-    ("/**/*", None),
-    ("/./*", None),
-    ("*/*",
-     ["etc/server/config", "etc/server/hosts", "home/user/.profile", "home/user/.bashrc",
-      "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
-      "srv2/blafasel", "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("*///*",
-     ["etc/server/config", "etc/server/hosts", "home/user/.profile", "home/user/.bashrc",
-      "home/user2/.profile", "home/user2/public_html/index.html", "srv/messages", "srv/dmesg",
-      "srv2/blafasel", "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("/home/u", []),
-    ("/home/*",
-     ["home/user/.profile", "home/user/.bashrc", "home/user2/.profile", "home/user2/public_html/index.html",
-      "home/foo/.thumbnails", "home/foo/bar/.thumbnails"]),
-    ("/home/user/*", ["home/user/.profile", "home/user/.bashrc"]),
-    ("/etc/*/*", ["etc/server/config", "etc/server/hosts"]),
-    ("/etc/**/*", ["etc/server/config", "etc/server/hosts"]),
-    ("/etc/**/*/*", ["etc/server/config", "etc/server/hosts"]),
-    ("*/.pr????e", []),
-    ("**/.pr????e", ["home/user/.profile", "home/user2/.profile"]),
-    ("///etc//////*", ["etc/server/config", "etc/server/hosts"]),
-    ("/./home//..//home/user2/", ["home/user2/.profile", "home/user2/public_html/index.html"]),
-    ("/./home//..//home/user2/**/*", ["home/user2/.profile", "home/user2/public_html/index.html"]),
-    ("/srv*/", ["srv/messages", "srv/dmesg", "srv2/blafasel"]),
-    ("/srv*", ["srv", "srv/messages", "srv/dmesg", "srv2", "srv2/blafasel"]),
-    ("/srv/*", ["srv/messages", "srv/dmesg"]),
-    ("/srv2/**", ["srv2", "srv2/blafasel"]),
-    ("/srv2/**/", ["srv2/blafasel"]),
-    ("/home/*/.thumbnails", ["home/foo/.thumbnails"]),
-    ("/home/*/*/.thumbnails", ["home/foo/bar/.thumbnails"]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("*", None),
+        ("**/*", None),
+        ("/**/*", None),
+        ("/./*", None),
+        (
+            "*/*",
+            [
+                "etc/server/config",
+                "etc/server/hosts",
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "srv/messages",
+                "srv/dmesg",
+                "srv2/blafasel",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        (
+            "*///*",
+            [
+                "etc/server/config",
+                "etc/server/hosts",
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "srv/messages",
+                "srv/dmesg",
+                "srv2/blafasel",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        ("/home/u", []),
+        (
+            "/home/*",
+            [
+                "home/user/.profile",
+                "home/user/.bashrc",
+                "home/user2/.profile",
+                "home/user2/public_html/index.html",
+                "home/foo/.thumbnails",
+                "home/foo/bar/.thumbnails",
+            ],
+        ),
+        ("/home/user/*", ["home/user/.profile", "home/user/.bashrc"]),
+        ("/etc/*/*", ["etc/server/config", "etc/server/hosts"]),
+        ("/etc/**/*", ["etc/server/config", "etc/server/hosts"]),
+        ("/etc/**/*/*", ["etc/server/config", "etc/server/hosts"]),
+        ("*/.pr????e", []),
+        ("**/.pr????e", ["home/user/.profile", "home/user2/.profile"]),
+        ("///etc//////*", ["etc/server/config", "etc/server/hosts"]),
+        ("/./home//..//home/user2/", ["home/user2/.profile", "home/user2/public_html/index.html"]),
+        ("/./home//..//home/user2/**/*", ["home/user2/.profile", "home/user2/public_html/index.html"]),
+        ("/srv*/", ["srv/messages", "srv/dmesg", "srv2/blafasel"]),
+        ("/srv*", ["srv", "srv/messages", "srv/dmesg", "srv2", "srv2/blafasel"]),
+        ("/srv/*", ["srv/messages", "srv/dmesg"]),
+        ("/srv2/**", ["srv2", "srv2/blafasel"]),
+        ("/srv2/**/", ["srv2/blafasel"]),
+        ("/home/*/.thumbnails", ["home/foo/.thumbnails"]),
+        ("/home/*/*/.thumbnails", ["home/foo/bar/.thumbnails"]),
+    ],
+)
 def test_patterns_shell(pattern, expected):
 def test_patterns_shell(pattern, expected):
     files = [
     files = [
-        "etc/server/config", "etc/server/hosts", "home", "home/user/.profile", "home/user/.bashrc",
-        "home/user2/.profile", "home/user2/public_html/index.html", "srv", "srv/messages", "srv/dmesg",
-        "srv2", "srv2/blafasel", "home/foo/.thumbnails", "home/foo/bar/.thumbnails",
+        "etc/server/config",
+        "etc/server/hosts",
+        "home",
+        "home/user/.profile",
+        "home/user/.bashrc",
+        "home/user2/.profile",
+        "home/user2/public_html/index.html",
+        "srv",
+        "srv/messages",
+        "srv/dmesg",
+        "srv2",
+        "srv2/blafasel",
+        "home/foo/.thumbnails",
+        "home/foo/bar/.thumbnails",
     ]
     ]
 
 
     check_patterns(files, ShellPattern(pattern), expected)
     check_patterns(files, ShellPattern(pattern), expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, expected", [
-    # "None" means all files, i.e. all match the given pattern
-    ("", None),
-    (".*", None),
-    ("^/", None),
-    ("^abc$", []),
-    ("^[^/]", []),
-    ("^(?!/srv|/foo|/opt)",
-     ["/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile",
-      "/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ]),
-    ])
+@pytest.mark.parametrize(
+    "pattern, expected",
+    [
+        # "None" means all files, i.e. all match the given pattern
+        ("", None),
+        (".*", None),
+        ("^/", None),
+        ("^abc$", []),
+        ("^[^/]", []),
+        (
+            "^(?!/srv|/foo|/opt)",
+            [
+                "/home",
+                "/home/user/.profile",
+                "/home/user/.bashrc",
+                "/home/user2/.profile",
+                "/home/user2/public_html/index.html",
+                "/home/foo/.thumbnails",
+                "/home/foo/bar/.thumbnails",
+            ],
+        ),
+    ],
+)
 def test_patterns_regex(pattern, expected):
 def test_patterns_regex(pattern, expected):
     files = [
     files = [
-        '/srv/data', '/foo/bar', '/home',
-        '/home/user/.profile', '/home/user/.bashrc',
-        '/home/user2/.profile', '/home/user2/public_html/index.html',
-        '/opt/log/messages.txt', '/opt/log/dmesg.txt',
-        "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
+        "/srv/data",
+        "/foo/bar",
+        "/home",
+        "/home/user/.profile",
+        "/home/user/.bashrc",
+        "/home/user2/.profile",
+        "/home/user2/public_html/index.html",
+        "/opt/log/messages.txt",
+        "/opt/log/dmesg.txt",
+        "/home/foo/.thumbnails",
+        "/home/foo/bar/.thumbnails",
     ]
     ]
 
 
     obj = RegexPattern(pattern)
     obj = RegexPattern(pattern)
@@ -202,11 +325,12 @@ def use_normalized_unicode():
 
 
 
 
 def _make_test_patterns(pattern):
 def _make_test_patterns(pattern):
-    return [PathPrefixPattern(pattern),
-            FnmatchPattern(pattern),
-            RegexPattern(f"^{pattern}/foo$"),
-            ShellPattern(pattern),
-            ]
+    return [
+        PathPrefixPattern(pattern),
+        FnmatchPattern(pattern),
+        RegexPattern(f"^{pattern}/foo$"),
+        ShellPattern(pattern),
+    ]
 
 
 
 
 @pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}"))
 @pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}"))
@@ -227,51 +351,63 @@ def test_invalid_unicode_pattern(pattern):
     assert pattern.match(str(b"ba\x80/foo", "latin1"))
     assert pattern.match(str(b"ba\x80/foo", "latin1"))
 
 
 
 
-@pytest.mark.parametrize("lines, expected", [
-    # "None" means all files, i.e. none excluded
-    ([], None),
-    (["# Comment only"], None),
-    (["*"], []),
-    (["# Comment",
-      "*/something00.txt",
-      "  *whitespace*  ",
-      # Whitespace before comment
-      " #/ws*",
-      # Empty line
-      "",
-      "# EOF"],
-     ["more/data", "home", " #/wsfoobar"]),
-    ([r"re:.*"], []),
-    ([r"re:\s"], ["data/something00.txt", "more/data", "home"]),
-    ([r"re:(.)(\1)"], ["more/data", "home", "\tstart/whitespace", "whitespace/end\t"]),
-    (["", "", "",
-      "# This is a test with mixed pattern styles",
-      # Case-insensitive pattern
-      r"re:(?i)BAR|ME$",
-      "",
-      "*whitespace*",
-      "fm:*/something00*"],
-     ["more/data"]),
-    ([r"  re:^\s  "], ["data/something00.txt", "more/data", "home", "whitespace/end\t"]),
-    ([r"  re:\s$  "], ["data/something00.txt", "more/data", "home", " #/wsfoobar", "\tstart/whitespace"]),
-    (["pp:./"], None),
-    # leading slash is removed
-    (["pp:/"], []),
-    (["pp:aaabbb"], None),
-    (["pp:/data", "pp: #/", "pp:\tstart", "pp:/whitespace"], ["more/data", "home"]),
-    (["/nomatch", "/more/*"],
-     ['data/something00.txt', 'home', ' #/wsfoobar', '\tstart/whitespace', 'whitespace/end\t']),
-    # the order of exclude patterns shouldn't matter
-    (["/more/*", "/nomatch"],
-     ['data/something00.txt', 'home', ' #/wsfoobar', '\tstart/whitespace', 'whitespace/end\t']),
-    ])
+@pytest.mark.parametrize(
+    "lines, expected",
+    [
+        # "None" means all files, i.e. none excluded
+        ([], None),
+        (["# Comment only"], None),
+        (["*"], []),
+        (
+            [
+                "# Comment",
+                "*/something00.txt",
+                "  *whitespace*  ",
+                # Whitespace before comment
+                " #/ws*",
+                # Empty line
+                "",
+                "# EOF",
+            ],
+            ["more/data", "home", " #/wsfoobar"],
+        ),
+        ([r"re:.*"], []),
+        ([r"re:\s"], ["data/something00.txt", "more/data", "home"]),
+        ([r"re:(.)(\1)"], ["more/data", "home", "\tstart/whitespace", "whitespace/end\t"]),
+        (
+            [
+                "",
+                "",
+                "",
+                "# This is a test with mixed pattern styles",
+                # Case-insensitive pattern
+                r"re:(?i)BAR|ME$",
+                "",
+                "*whitespace*",
+                "fm:*/something00*",
+            ],
+            ["more/data"],
+        ),
+        ([r"  re:^\s  "], ["data/something00.txt", "more/data", "home", "whitespace/end\t"]),
+        ([r"  re:\s$  "], ["data/something00.txt", "more/data", "home", " #/wsfoobar", "\tstart/whitespace"]),
+        (["pp:./"], None),
+        # leading slash is removed
+        (["pp:/"], []),
+        (["pp:aaabbb"], None),
+        (["pp:/data", "pp: #/", "pp:\tstart", "pp:/whitespace"], ["more/data", "home"]),
+        (
+            ["/nomatch", "/more/*"],
+            ["data/something00.txt", "home", " #/wsfoobar", "\tstart/whitespace", "whitespace/end\t"],
+        ),
+        # the order of exclude patterns shouldn't matter
+        (
+            ["/more/*", "/nomatch"],
+            ["data/something00.txt", "home", " #/wsfoobar", "\tstart/whitespace", "whitespace/end\t"],
+        ),
+    ],
+)
 def test_exclude_patterns_from_file(tmpdir, lines, expected):
 def test_exclude_patterns_from_file(tmpdir, lines, expected):
-    files = [
-        'data/something00.txt', 'more/data', 'home',
-        ' #/wsfoobar',
-        '\tstart/whitespace',
-        'whitespace/end\t',
-    ]
+    files = ["data/something00.txt", "more/data", "home", " #/wsfoobar", "\tstart/whitespace", "whitespace/end\t"]
 
 
     def evaluate(filename):
     def evaluate(filename):
         patterns = []
         patterns = []
@@ -288,26 +424,26 @@ def test_exclude_patterns_from_file(tmpdir, lines, expected):
     assert evaluate(str(exclfile)) == (files if expected is None else expected)
     assert evaluate(str(exclfile)) == (files if expected is None else expected)
 
 
 
 
-@pytest.mark.parametrize("lines, expected_roots, expected_numpatterns", [
-    # "None" means all files, i.e. none excluded
-    ([], [], 0),
-    (["# Comment only"], [], 0),
-    (["- *"], [], 1),
-    (["+fm:*/something00.txt",
-      "-/data"], [], 2),
-    (["R /"], ["/"], 0),
-    (["R /",
-      "# comment"], ["/"], 0),
-    (["# comment",
-      "- /data",
-      "R /home"], ["/home"], 1),
-])
+@pytest.mark.parametrize(
+    "lines, expected_roots, expected_numpatterns",
+    [
+        # "None" means all files, i.e. none excluded
+        ([], [], 0),
+        (["# Comment only"], [], 0),
+        (["- *"], [], 1),
+        (["+fm:*/something00.txt", "-/data"], [], 2),
+        (["R /"], ["/"], 0),
+        (["R /", "# comment"], ["/"], 0),
+        (["# comment", "- /data", "R /home"], ["/home"], 1),
+    ],
+)
 def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatterns):
 def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatterns):
     def evaluate(filename):
     def evaluate(filename):
         roots = []
         roots = []
         inclexclpatterns = []
         inclexclpatterns = []
         load_pattern_file(open(filename), roots, inclexclpatterns)
         load_pattern_file(open(filename), roots, inclexclpatterns)
         return roots, len(inclexclpatterns)
         return roots, len(inclexclpatterns)
+
     patternfile = tmpdir.join("patterns.txt")
     patternfile = tmpdir.join("patterns.txt")
 
 
     with patternfile.open("wt") as fh:
     with patternfile.open("wt") as fh:
@@ -344,10 +480,9 @@ def test_switch_patterns_style():
     assert isinstance(patterns[5].val, ShellPattern)
     assert isinstance(patterns[5].val, ShellPattern)
 
 
 
 
-@pytest.mark.parametrize("lines", [
-    (["X /data"]),  # illegal pattern type prefix
-    (["/data"]),    # need a pattern type prefix
-])
+@pytest.mark.parametrize(
+    "lines", [(["X /data"]), (["/data"])]  # illegal pattern type prefix  # need a pattern type prefix
+)
 def test_load_invalid_patterns_from_file(tmpdir, lines):
 def test_load_invalid_patterns_from_file(tmpdir, lines):
     patternfile = tmpdir.join("patterns.txt")
     patternfile = tmpdir.join("patterns.txt")
     with patternfile.open("wt") as fh:
     with patternfile.open("wt") as fh:
@@ -359,41 +494,47 @@ def test_load_invalid_patterns_from_file(tmpdir, lines):
         load_pattern_file(open(filename), roots, inclexclpatterns)
         load_pattern_file(open(filename), roots, inclexclpatterns)
 
 
 
 
-@pytest.mark.parametrize("lines, expected", [
-    # "None" means all files, i.e. none excluded
-    ([], None),
-    (["# Comment only"], None),
-    (["- *"], []),
-    # default match type is sh: for patterns -> * doesn't match a /
-    (["-*/something0?.txt"],
-     ['data', 'data/subdir/something01.txt',
-      'home', 'home/leo', 'home/leo/t', 'home/other']),
-    (["-fm:*/something00.txt"],
-     ['data', 'data/subdir/something01.txt', 'home', 'home/leo', 'home/leo/t', 'home/other']),
-    (["-fm:*/something0?.txt"],
-     ["data", 'home', 'home/leo', 'home/leo/t', 'home/other']),
-    (["+/*/something0?.txt",
-      "-/data"],
-     ["data/something00.txt", 'home', 'home/leo', 'home/leo/t', 'home/other']),
-    (["+fm:*/something00.txt",
-      "-/data"],
-     ["data/something00.txt", 'home', 'home/leo', 'home/leo/t', 'home/other']),
-    # include /home/leo and exclude the rest of /home:
-    (["+/home/leo",
-      "-/home/*"],
-     ['data', 'data/something00.txt', 'data/subdir/something01.txt', 'home', 'home/leo', 'home/leo/t']),
-    # wrong order, /home/leo is already excluded by -/home/*:
-    (["-/home/*",
-      "+/home/leo"],
-     ['data', 'data/something00.txt', 'data/subdir/something01.txt', 'home']),
-    (["+fm:/home/leo",
-      "-/home/"],
-     ['data', 'data/something00.txt', 'data/subdir/something01.txt', 'home', 'home/leo', 'home/leo/t']),
-])
+@pytest.mark.parametrize(
+    "lines, expected",
+    [
+        # "None" means all files, i.e. none excluded
+        ([], None),
+        (["# Comment only"], None),
+        (["- *"], []),
+        # default match type is sh: for patterns -> * doesn't match a /
+        (
+            ["-*/something0?.txt"],
+            ["data", "data/subdir/something01.txt", "home", "home/leo", "home/leo/t", "home/other"],
+        ),
+        (
+            ["-fm:*/something00.txt"],
+            ["data", "data/subdir/something01.txt", "home", "home/leo", "home/leo/t", "home/other"],
+        ),
+        (["-fm:*/something0?.txt"], ["data", "home", "home/leo", "home/leo/t", "home/other"]),
+        (["+/*/something0?.txt", "-/data"], ["data/something00.txt", "home", "home/leo", "home/leo/t", "home/other"]),
+        (["+fm:*/something00.txt", "-/data"], ["data/something00.txt", "home", "home/leo", "home/leo/t", "home/other"]),
+        # include /home/leo and exclude the rest of /home:
+        (
+            ["+/home/leo", "-/home/*"],
+            ["data", "data/something00.txt", "data/subdir/something01.txt", "home", "home/leo", "home/leo/t"],
+        ),
+        # wrong order, /home/leo is already excluded by -/home/*:
+        (["-/home/*", "+/home/leo"], ["data", "data/something00.txt", "data/subdir/something01.txt", "home"]),
+        (
+            ["+fm:/home/leo", "-/home/"],
+            ["data", "data/something00.txt", "data/subdir/something01.txt", "home", "home/leo", "home/leo/t"],
+        ),
+    ],
+)
 def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
 def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
     files = [
     files = [
-        'data', 'data/something00.txt', 'data/subdir/something01.txt',
-        'home', 'home/leo', 'home/leo/t', 'home/other'
+        "data",
+        "data/something00.txt",
+        "data/subdir/something01.txt",
+        "home",
+        "home/leo",
+        "home/leo/t",
+        "home/other",
     ]
     ]
 
 
     def evaluate(filename):
     def evaluate(filename):
@@ -412,37 +553,35 @@ def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
     assert evaluate(str(patternfile)) == (files if expected is None else expected)
     assert evaluate(str(patternfile)) == (files if expected is None else expected)
 
 
 
 
-@pytest.mark.parametrize("pattern, cls", [
-    ("", FnmatchPattern),
-
-    # Default style
-    ("*", FnmatchPattern),
-    ("/data/*", FnmatchPattern),
-
-    # fnmatch style
-    ("fm:", FnmatchPattern),
-    ("fm:*", FnmatchPattern),
-    ("fm:/data/*", FnmatchPattern),
-    ("fm:fm:/data/*", FnmatchPattern),
-
-    # Regular expression
-    ("re:", RegexPattern),
-    ("re:.*", RegexPattern),
-    ("re:^/something/", RegexPattern),
-    ("re:re:^/something/", RegexPattern),
-
-    # Path prefix
-    ("pp:", PathPrefixPattern),
-    ("pp:/", PathPrefixPattern),
-    ("pp:/data/", PathPrefixPattern),
-    ("pp:pp:/data/", PathPrefixPattern),
-
-    # Shell-pattern style
-    ("sh:", ShellPattern),
-    ("sh:*", ShellPattern),
-    ("sh:/data/*", ShellPattern),
-    ("sh:sh:/data/*", ShellPattern),
-    ])
+@pytest.mark.parametrize(
+    "pattern, cls",
+    [
+        ("", FnmatchPattern),
+        # Default style
+        ("*", FnmatchPattern),
+        ("/data/*", FnmatchPattern),
+        # fnmatch style
+        ("fm:", FnmatchPattern),
+        ("fm:*", FnmatchPattern),
+        ("fm:/data/*", FnmatchPattern),
+        ("fm:fm:/data/*", FnmatchPattern),
+        # Regular expression
+        ("re:", RegexPattern),
+        ("re:.*", RegexPattern),
+        ("re:^/something/", RegexPattern),
+        ("re:re:^/something/", RegexPattern),
+        # Path prefix
+        ("pp:", PathPrefixPattern),
+        ("pp:/", PathPrefixPattern),
+        ("pp:/data/", PathPrefixPattern),
+        ("pp:pp:/data/", PathPrefixPattern),
+        # Shell-pattern style
+        ("sh:", ShellPattern),
+        ("sh:*", ShellPattern),
+        ("sh:/data/*", ShellPattern),
+        ("sh:sh:/data/*", ShellPattern),
+    ],
+)
 def test_parse_pattern(pattern, cls):
 def test_parse_pattern(pattern, cls):
     assert isinstance(parse_pattern(pattern), cls)
     assert isinstance(parse_pattern(pattern), cls)
 
 

+ 84 - 56
src/borg/testsuite/platform.py

@@ -21,7 +21,9 @@ group:root:r--:0
 group:9999:r--:9999
 group:9999:r--:9999
 mask::rw-
 mask::rw-
 other::r--
 other::r--
-""".strip().encode('ascii')
+""".strip().encode(
+    "ascii"
+)
 
 
 DEFAULT_ACL = """
 DEFAULT_ACL = """
 user::rw-
 user::rw-
@@ -32,18 +34,21 @@ group:root:r--:0
 group:8888:r--:8888
 group:8888:r--:8888
 mask::rw-
 mask::rw-
 other::r--
 other::r--
-""".strip().encode('ascii')
+""".strip().encode(
+    "ascii"
+)
 
 
 _acls_working = None
 _acls_working = None
 
 
 
 
 def fakeroot_detected():
 def fakeroot_detected():
-    return 'FAKEROOTKEY' in os.environ
+    return "FAKEROOTKEY" in os.environ
 
 
 
 
 def user_exists(username):
 def user_exists(username):
     if not is_win32:
     if not is_win32:
         import pwd
         import pwd
+
         try:
         try:
             pwd.getpwnam(username)
             pwd.getpwnam(username)
             return True
             return True
@@ -55,25 +60,24 @@ def user_exists(username):
 @functools.lru_cache
 @functools.lru_cache
 def are_acls_working():
 def are_acls_working():
     with unopened_tempfile() as filepath:
     with unopened_tempfile() as filepath:
-        open(filepath, 'w').close()
+        open(filepath, "w").close()
         try:
         try:
-            access = b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n'
-            acl = {'acl_access': access}
+            access = b"user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n"
+            acl = {"acl_access": access}
             acl_set(filepath, acl)
             acl_set(filepath, acl)
             read_acl = {}
             read_acl = {}
             acl_get(filepath, read_acl, os.stat(filepath))
             acl_get(filepath, read_acl, os.stat(filepath))
-            read_acl_access = read_acl.get('acl_access', None)
-            if read_acl_access and b'user::rw-' in read_acl_access:
+            read_acl_access = read_acl.get("acl_access", None)
+            if read_acl_access and b"user::rw-" in read_acl_access:
                 return True
                 return True
         except PermissionError:
         except PermissionError:
             pass
             pass
         return False
         return False
 
 
 
 
-@unittest.skipUnless(sys.platform.startswith('linux'), 'linux only test')
-@unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot')
+@unittest.skipUnless(sys.platform.startswith("linux"), "linux only test")
+@unittest.skipIf(fakeroot_detected(), "not compatible with fakeroot")
 class PlatformLinuxTestCase(BaseTestCase):
 class PlatformLinuxTestCase(BaseTestCase):
-
     def setUp(self):
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
         self.tmpdir = tempfile.mkdtemp()
 
 
@@ -86,72 +90,80 @@ class PlatformLinuxTestCase(BaseTestCase):
         return item
         return item
 
 
     def set_acl(self, path, access=None, default=None, numeric_ids=False):
     def set_acl(self, path, access=None, default=None, numeric_ids=False):
-        item = {'acl_access': access, 'acl_default': default}
+        item = {"acl_access": access, "acl_default": default}
         acl_set(path, item, numeric_ids=numeric_ids)
         acl_set(path, item, numeric_ids=numeric_ids)
 
 
-    @unittest.skipIf(not are_acls_working(), 'ACLs do not work')
+    @unittest.skipIf(not are_acls_working(), "ACLs do not work")
     def test_access_acl(self):
     def test_access_acl(self):
         file = tempfile.NamedTemporaryFile()
         file = tempfile.NamedTemporaryFile()
         self.assert_equal(self.get_acl(file.name), {})
         self.assert_equal(self.get_acl(file.name), {})
-        self.set_acl(file.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_ids=False)
-        self.assert_in(b'user:root:rw-:0', self.get_acl(file.name)['acl_access'])
-        self.assert_in(b'group:root:rw-:0', self.get_acl(file.name)['acl_access'])
-        self.assert_in(b'user:0:rw-:0', self.get_acl(file.name, numeric_ids=True)['acl_access'])
+        self.set_acl(
+            file.name,
+            access=b"user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n",
+            numeric_ids=False,
+        )
+        self.assert_in(b"user:root:rw-:0", self.get_acl(file.name)["acl_access"])
+        self.assert_in(b"group:root:rw-:0", self.get_acl(file.name)["acl_access"])
+        self.assert_in(b"user:0:rw-:0", self.get_acl(file.name, numeric_ids=True)["acl_access"])
         file2 = tempfile.NamedTemporaryFile()
         file2 = tempfile.NamedTemporaryFile()
-        self.set_acl(file2.name, access=b'user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n', numeric_ids=True)
-        self.assert_in(b'user:9999:rw-:9999', self.get_acl(file2.name)['acl_access'])
-        self.assert_in(b'group:9999:rw-:9999', self.get_acl(file2.name)['acl_access'])
-
-    @unittest.skipIf(not are_acls_working(), 'ACLs do not work')
+        self.set_acl(
+            file2.name,
+            access=b"user::rw-\ngroup::r--\nmask::rw-\nother::---\nuser:root:rw-:9999\ngroup:root:rw-:9999\n",
+            numeric_ids=True,
+        )
+        self.assert_in(b"user:9999:rw-:9999", self.get_acl(file2.name)["acl_access"])
+        self.assert_in(b"group:9999:rw-:9999", self.get_acl(file2.name)["acl_access"])
+
+    @unittest.skipIf(not are_acls_working(), "ACLs do not work")
     def test_default_acl(self):
     def test_default_acl(self):
         self.assert_equal(self.get_acl(self.tmpdir), {})
         self.assert_equal(self.get_acl(self.tmpdir), {})
         self.set_acl(self.tmpdir, access=ACCESS_ACL, default=DEFAULT_ACL)
         self.set_acl(self.tmpdir, access=ACCESS_ACL, default=DEFAULT_ACL)
-        self.assert_equal(self.get_acl(self.tmpdir)['acl_access'], ACCESS_ACL)
-        self.assert_equal(self.get_acl(self.tmpdir)['acl_default'], DEFAULT_ACL)
+        self.assert_equal(self.get_acl(self.tmpdir)["acl_access"], ACCESS_ACL)
+        self.assert_equal(self.get_acl(self.tmpdir)["acl_default"], DEFAULT_ACL)
 
 
-    @unittest.skipIf(not user_exists('übel'), 'requires übel user')
-    @unittest.skipIf(not are_acls_working(), 'ACLs do not work')
+    @unittest.skipIf(not user_exists("übel"), "requires übel user")
+    @unittest.skipIf(not are_acls_working(), "ACLs do not work")
     def test_non_ascii_acl(self):
     def test_non_ascii_acl(self):
         # Testing non-ascii ACL processing to see whether our code is robust.
         # Testing non-ascii ACL processing to see whether our code is robust.
         # I have no idea whether non-ascii ACLs are allowed by the standard,
         # I have no idea whether non-ascii ACLs are allowed by the standard,
         # but in practice they seem to be out there and must not make our code explode.
         # but in practice they seem to be out there and must not make our code explode.
         file = tempfile.NamedTemporaryFile()
         file = tempfile.NamedTemporaryFile()
         self.assert_equal(self.get_acl(file.name), {})
         self.assert_equal(self.get_acl(file.name), {})
-        nothing_special = b'user::rw-\ngroup::r--\nmask::rw-\nother::---\n'
+        nothing_special = b"user::rw-\ngroup::r--\nmask::rw-\nother::---\n"
         # TODO: can this be tested without having an existing system user übel with uid 666 gid 666?
         # TODO: can this be tested without having an existing system user übel with uid 666 gid 666?
-        user_entry = 'user:übel:rw-:666'.encode()
-        user_entry_numeric = b'user:666:rw-:666'
-        group_entry = 'group:übel:rw-:666'.encode()
-        group_entry_numeric = b'group:666:rw-:666'
-        acl = b'\n'.join([nothing_special, user_entry, group_entry])
+        user_entry = "user:übel:rw-:666".encode()
+        user_entry_numeric = b"user:666:rw-:666"
+        group_entry = "group:übel:rw-:666".encode()
+        group_entry_numeric = b"group:666:rw-:666"
+        acl = b"\n".join([nothing_special, user_entry, group_entry])
         self.set_acl(file.name, access=acl, numeric_ids=False)
         self.set_acl(file.name, access=acl, numeric_ids=False)
-        acl_access = self.get_acl(file.name, numeric_ids=False)['acl_access']
+        acl_access = self.get_acl(file.name, numeric_ids=False)["acl_access"]
         self.assert_in(user_entry, acl_access)
         self.assert_in(user_entry, acl_access)
         self.assert_in(group_entry, acl_access)
         self.assert_in(group_entry, acl_access)
-        acl_access_numeric = self.get_acl(file.name, numeric_ids=True)['acl_access']
+        acl_access_numeric = self.get_acl(file.name, numeric_ids=True)["acl_access"]
         self.assert_in(user_entry_numeric, acl_access_numeric)
         self.assert_in(user_entry_numeric, acl_access_numeric)
         self.assert_in(group_entry_numeric, acl_access_numeric)
         self.assert_in(group_entry_numeric, acl_access_numeric)
         file2 = tempfile.NamedTemporaryFile()
         file2 = tempfile.NamedTemporaryFile()
         self.set_acl(file2.name, access=acl, numeric_ids=True)
         self.set_acl(file2.name, access=acl, numeric_ids=True)
-        acl_access = self.get_acl(file2.name, numeric_ids=False)['acl_access']
+        acl_access = self.get_acl(file2.name, numeric_ids=False)["acl_access"]
         self.assert_in(user_entry, acl_access)
         self.assert_in(user_entry, acl_access)
         self.assert_in(group_entry, acl_access)
         self.assert_in(group_entry, acl_access)
-        acl_access_numeric = self.get_acl(file.name, numeric_ids=True)['acl_access']
+        acl_access_numeric = self.get_acl(file.name, numeric_ids=True)["acl_access"]
         self.assert_in(user_entry_numeric, acl_access_numeric)
         self.assert_in(user_entry_numeric, acl_access_numeric)
         self.assert_in(group_entry_numeric, acl_access_numeric)
         self.assert_in(group_entry_numeric, acl_access_numeric)
 
 
     def test_utils(self):
     def test_utils(self):
         from ..platform.linux import acl_use_local_uid_gid
         from ..platform.linux import acl_use_local_uid_gid
-        self.assert_equal(acl_use_local_uid_gid(b'user:nonexistent1234:rw-:1234'), b'user:1234:rw-')
-        self.assert_equal(acl_use_local_uid_gid(b'group:nonexistent1234:rw-:1234'), b'group:1234:rw-')
-        self.assert_equal(acl_use_local_uid_gid(b'user:root:rw-:0'), b'user:0:rw-')
-        self.assert_equal(acl_use_local_uid_gid(b'group:root:rw-:0'), b'group:0:rw-')
 
 
+        self.assert_equal(acl_use_local_uid_gid(b"user:nonexistent1234:rw-:1234"), b"user:1234:rw-")
+        self.assert_equal(acl_use_local_uid_gid(b"group:nonexistent1234:rw-:1234"), b"group:1234:rw-")
+        self.assert_equal(acl_use_local_uid_gid(b"user:root:rw-:0"), b"user:0:rw-")
+        self.assert_equal(acl_use_local_uid_gid(b"group:root:rw-:0"), b"group:0:rw-")
 
 
-@unittest.skipUnless(sys.platform.startswith('darwin'), 'OS X only test')
-@unittest.skipIf(fakeroot_detected(), 'not compatible with fakeroot')
-class PlatformDarwinTestCase(BaseTestCase):
 
 
+@unittest.skipUnless(sys.platform.startswith("darwin"), "OS X only test")
+@unittest.skipIf(fakeroot_detected(), "not compatible with fakeroot")
+class PlatformDarwinTestCase(BaseTestCase):
     def setUp(self):
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
         self.tmpdir = tempfile.mkdtemp()
 
 
@@ -164,25 +176,41 @@ class PlatformDarwinTestCase(BaseTestCase):
         return item
         return item
 
 
     def set_acl(self, path, acl, numeric_ids=False):
     def set_acl(self, path, acl, numeric_ids=False):
-        item = {'acl_extended': acl}
+        item = {"acl_extended": acl}
         acl_set(path, item, numeric_ids=numeric_ids)
         acl_set(path, item, numeric_ids=numeric_ids)
 
 
-    @unittest.skipIf(not are_acls_working(), 'ACLs do not work')
+    @unittest.skipIf(not are_acls_working(), "ACLs do not work")
     def test_access_acl(self):
     def test_access_acl(self):
         file = tempfile.NamedTemporaryFile()
         file = tempfile.NamedTemporaryFile()
         file2 = tempfile.NamedTemporaryFile()
         file2 = tempfile.NamedTemporaryFile()
         self.assert_equal(self.get_acl(file.name), {})
         self.assert_equal(self.get_acl(file.name), {})
-        self.set_acl(file.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_ids=False)
-        self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000014:staff:20:allow:read', self.get_acl(file.name)['acl_extended'])
-        self.assert_in(b'user:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read', self.get_acl(file.name)['acl_extended'])
-        self.set_acl(file2.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_ids=True)
-        self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:wheel:0:allow:read', self.get_acl(file2.name)['acl_extended'])
-        self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000::0:allow:read', self.get_acl(file2.name, numeric_ids=True)['acl_extended'])
-
-
-@unittest.skipUnless(sys.platform.startswith(('linux', 'freebsd', 'darwin')), 'POSIX only tests')
+        self.set_acl(
+            file.name,
+            b"!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n",
+            numeric_ids=False,
+        )
+        self.assert_in(
+            b"group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000014:staff:20:allow:read", self.get_acl(file.name)["acl_extended"]
+        )
+        self.assert_in(
+            b"user:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read", self.get_acl(file.name)["acl_extended"]
+        )
+        self.set_acl(
+            file2.name,
+            b"!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n",
+            numeric_ids=True,
+        )
+        self.assert_in(
+            b"group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:wheel:0:allow:read", self.get_acl(file2.name)["acl_extended"]
+        )
+        self.assert_in(
+            b"group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000::0:allow:read",
+            self.get_acl(file2.name, numeric_ids=True)["acl_extended"],
+        )
+
+
+@unittest.skipUnless(sys.platform.startswith(("linux", "freebsd", "darwin")), "POSIX only tests")
 class PlatformPosixTestCase(BaseTestCase):
 class PlatformPosixTestCase(BaseTestCase):
-
     def test_swidth_ascii(self):
     def test_swidth_ascii(self):
         self.assert_equal(swidth("borg"), 4)
         self.assert_equal(swidth("borg"), 4)
 
 
@@ -197,7 +225,7 @@ def test_process_alive(free_pid):
     id = get_process_id()
     id = get_process_id()
     assert process_alive(*id)
     assert process_alive(*id)
     host, pid, tid = id
     host, pid, tid = id
-    assert process_alive(host + 'abc', pid, tid)
+    assert process_alive(host + "abc", pid, tid)
     assert process_alive(host, pid, tid + 1)
     assert process_alive(host, pid, tid + 1)
     assert not process_alive(host, free_pid, tid)
     assert not process_alive(host, free_pid, tid)
 
 

+ 22 - 22
src/borg/testsuite/remote.py

@@ -72,10 +72,10 @@ class TestSleepingBandwidthLimiter:
 class TestRepositoryCache:
 class TestRepositoryCache:
     @pytest.fixture
     @pytest.fixture
     def repository(self, tmpdir):
     def repository(self, tmpdir):
-        self.repository_location = os.path.join(str(tmpdir), 'repository')
+        self.repository_location = os.path.join(str(tmpdir), "repository")
         with Repository(self.repository_location, exclusive=True, create=True) as repository:
         with Repository(self.repository_location, exclusive=True, create=True) as repository:
-            repository.put(H(1), b'1234')
-            repository.put(H(2), b'5678')
+            repository.put(H(1), b"1234")
+            repository.put(H(2), b"5678")
             repository.put(H(3), bytes(100))
             repository.put(H(3), bytes(100))
             yield repository
             yield repository
 
 
@@ -85,19 +85,19 @@ class TestRepositoryCache:
 
 
     def test_simple(self, cache: RepositoryCache):
     def test_simple(self, cache: RepositoryCache):
         # Single get()s are not cached, since they are used for unique objects like archives.
         # Single get()s are not cached, since they are used for unique objects like archives.
-        assert cache.get(H(1)) == b'1234'
+        assert cache.get(H(1)) == b"1234"
         assert cache.misses == 1
         assert cache.misses == 1
         assert cache.hits == 0
         assert cache.hits == 0
 
 
-        assert list(cache.get_many([H(1)])) == [b'1234']
+        assert list(cache.get_many([H(1)])) == [b"1234"]
         assert cache.misses == 2
         assert cache.misses == 2
         assert cache.hits == 0
         assert cache.hits == 0
 
 
-        assert list(cache.get_many([H(1)])) == [b'1234']
+        assert list(cache.get_many([H(1)])) == [b"1234"]
         assert cache.misses == 2
         assert cache.misses == 2
         assert cache.hits == 1
         assert cache.hits == 1
 
 
-        assert cache.get(H(1)) == b'1234'
+        assert cache.get(H(1)) == b"1234"
         assert cache.misses == 2
         assert cache.misses == 2
         assert cache.hits == 2
         assert cache.hits == 2
 
 
@@ -105,11 +105,11 @@ class TestRepositoryCache:
         def query_size_limit():
         def query_size_limit():
             cache.size_limit = 0
             cache.size_limit = 0
 
 
-        assert list(cache.get_many([H(1), H(2)])) == [b'1234', b'5678']
+        assert list(cache.get_many([H(1), H(2)])) == [b"1234", b"5678"]
         assert cache.misses == 2
         assert cache.misses == 2
         assert cache.evictions == 0
         assert cache.evictions == 0
         iterator = cache.get_many([H(1), H(3), H(2)])
         iterator = cache.get_many([H(1), H(3), H(2)])
-        assert next(iterator) == b'1234'
+        assert next(iterator) == b"1234"
 
 
         # Force cache to back off
         # Force cache to back off
         qsl = cache.query_size_limit
         qsl = cache.query_size_limit
@@ -124,7 +124,7 @@ class TestRepositoryCache:
         assert cache.slow_misses == 0
         assert cache.slow_misses == 0
         # Since H(2) was in the cache when we called get_many(), but has
         # Since H(2) was in the cache when we called get_many(), but has
         # been evicted during iterating the generator, it will be a slow miss.
         # been evicted during iterating the generator, it will be a slow miss.
-        assert next(iterator) == b'5678'
+        assert next(iterator) == b"5678"
         assert cache.slow_misses == 1
         assert cache.slow_misses == 1
 
 
     def test_enospc(self, cache: RepositoryCache):
     def test_enospc(self, cache: RepositoryCache):
@@ -139,16 +139,16 @@ class TestRepositoryCache:
                 pass
                 pass
 
 
             def write(self, data):
             def write(self, data):
-                raise OSError(errno.ENOSPC, 'foo')
+                raise OSError(errno.ENOSPC, "foo")
 
 
             def truncate(self, n=None):
             def truncate(self, n=None):
                 pass
                 pass
 
 
         iterator = cache.get_many([H(1), H(2), H(3)])
         iterator = cache.get_many([H(1), H(2), H(3)])
-        assert next(iterator) == b'1234'
+        assert next(iterator) == b"1234"
 
 
-        with patch('builtins.open', enospc_open):
-            assert next(iterator) == b'5678'
+        with patch("builtins.open", enospc_open):
+            assert next(iterator) == b"5678"
             assert cache.enospc == 1
             assert cache.enospc == 1
             # We didn't patch query_size_limit which would set size_limit to some low
             # We didn't patch query_size_limit which would set size_limit to some low
             # value, so nothing was actually evicted.
             # value, so nothing was actually evicted.
@@ -158,9 +158,9 @@ class TestRepositoryCache:
 
 
     @pytest.fixture
     @pytest.fixture
     def key(self, repository, monkeypatch):
     def key(self, repository, monkeypatch):
-        monkeypatch.setenv('BORG_PASSPHRASE', 'test')
+        monkeypatch.setenv("BORG_PASSPHRASE", "test")
         key = PlaintextKey.create(repository, TestKey.MockArgs())
         key = PlaintextKey.create(repository, TestKey.MockArgs())
-        key.compressor = CompressionSpec('none').compressor
+        key.compressor = CompressionSpec("none").compressor
         return key
         return key
 
 
     def _put_encrypted_object(self, key, repository, data):
     def _put_encrypted_object(self, key, repository, data):
@@ -170,11 +170,11 @@ class TestRepositoryCache:
 
 
     @pytest.fixture
     @pytest.fixture
     def H1(self, key, repository):
     def H1(self, key, repository):
-        return self._put_encrypted_object(key, repository, b'1234')
+        return self._put_encrypted_object(key, repository, b"1234")
 
 
     @pytest.fixture
     @pytest.fixture
     def H2(self, key, repository):
     def H2(self, key, repository):
-        return self._put_encrypted_object(key, repository, b'5678')
+        return self._put_encrypted_object(key, repository, b"5678")
 
 
     @pytest.fixture
     @pytest.fixture
     def H3(self, key, repository):
     def H3(self, key, repository):
@@ -188,14 +188,14 @@ class TestRepositoryCache:
         list(decrypted_cache.get_many([H1, H2, H3]))
         list(decrypted_cache.get_many([H1, H2, H3]))
 
 
         iterator = decrypted_cache.get_many([H1, H2, H3])
         iterator = decrypted_cache.get_many([H1, H2, H3])
-        assert next(iterator) == (7, b'1234')
+        assert next(iterator) == (7, b"1234")
 
 
-        with open(decrypted_cache.key_filename(H2), 'a+b') as fd:
+        with open(decrypted_cache.key_filename(H2), "a+b") as fd:
             fd.seek(-1, io.SEEK_END)
             fd.seek(-1, io.SEEK_END)
-            corrupted = (int.from_bytes(fd.read(), 'little') ^ 2).to_bytes(1, 'little')
+            corrupted = (int.from_bytes(fd.read(), "little") ^ 2).to_bytes(1, "little")
             fd.seek(-1, io.SEEK_END)
             fd.seek(-1, io.SEEK_END)
             fd.write(corrupted)
             fd.write(corrupted)
             fd.truncate()
             fd.truncate()
 
 
         with pytest.raises(IntegrityError):
         with pytest.raises(IntegrityError):
-            assert next(iterator) == (7, b'5678')
+            assert next(iterator) == (7, b"5678")

Diff do ficheiro suprimidas por serem muito extensas
+ 229 - 212
src/borg/testsuite/repository.py


+ 90 - 97
src/borg/testsuite/shellpattern.py

@@ -11,103 +11,96 @@ def check(path, pattern):
     return bool(compiled.match(path))
     return bool(compiled.match(path))
 
 
 
 
-@pytest.mark.parametrize("path, patterns", [
-    # Literal string
-    ("foo/bar", ["foo/bar"]),
-    ("foo\\bar", ["foo\\bar"]),
-
-    # Non-ASCII
-    ("foo/c/\u0152/e/bar", ["foo/*/\u0152/*/bar", "*/*/\u0152/*/*", "**/\u0152/*/*"]),
-    ("\u00e4\u00f6\u00dc", ["???", "*", "\u00e4\u00f6\u00dc", "[\u00e4][\u00f6][\u00dc]"]),
-
-    # Question mark
-    ("foo", ["fo?"]),
-    ("foo", ["f?o"]),
-    ("foo", ["f??"]),
-    ("foo", ["?oo"]),
-    ("foo", ["?o?"]),
-    ("foo", ["??o"]),
-    ("foo", ["???"]),
-
-    # Single asterisk
-    ("", ["*"]),
-    ("foo", ["*", "**", "***"]),
-    ("foo", ["foo*"]),
-    ("foobar", ["foo*"]),
-    ("foobar", ["foo*bar"]),
-    ("foobarbaz", ["foo*baz"]),
-    ("bar", ["*bar"]),
-    ("foobar", ["*bar"]),
-    ("foo/bar", ["foo/*bar"]),
-    ("foo/bar", ["foo/*ar"]),
-    ("foo/bar", ["foo/*r"]),
-    ("foo/bar", ["foo/*"]),
-    ("foo/bar", ["foo*/bar"]),
-    ("foo/bar", ["fo*/bar"]),
-    ("foo/bar", ["f*/bar"]),
-    ("foo/bar", ["*/bar"]),
-
-    # Double asterisk (matches 0..n directory layers)
-    ("foo/bar", ["foo/**/bar"]),
-    ("foo/1/bar", ["foo/**/bar"]),
-    ("foo/1/22/333/bar", ["foo/**/bar"]),
-    ("foo/", ["foo/**/"]),
-    ("foo/1/", ["foo/**/"]),
-    ("foo/1/22/333/", ["foo/**/"]),
-    ("bar", ["**/bar"]),
-    ("1/bar", ["**/bar"]),
-    ("1/22/333/bar", ["**/bar"]),
-    ("foo/bar/baz", ["foo/**/*"]),
-
-    # Set
-    ("foo1", ["foo[12]"]),
-    ("foo2", ["foo[12]"]),
-    ("foo2/bar", ["foo[12]/*"]),
-    ("f??f", ["f??f", "f[?][?]f"]),
-    ("foo]", ["foo[]]"]),
-
-    # Inverted set
-    ("foo3", ["foo[!12]"]),
-    ("foo^", ["foo[^!]"]),
-    ("foo!", ["foo[^!]"]),
-    ])
+@pytest.mark.parametrize(
+    "path, patterns",
+    [
+        # Literal string
+        ("foo/bar", ["foo/bar"]),
+        ("foo\\bar", ["foo\\bar"]),
+        # Non-ASCII
+        ("foo/c/\u0152/e/bar", ["foo/*/\u0152/*/bar", "*/*/\u0152/*/*", "**/\u0152/*/*"]),
+        ("\u00e4\u00f6\u00dc", ["???", "*", "\u00e4\u00f6\u00dc", "[\u00e4][\u00f6][\u00dc]"]),
+        # Question mark
+        ("foo", ["fo?"]),
+        ("foo", ["f?o"]),
+        ("foo", ["f??"]),
+        ("foo", ["?oo"]),
+        ("foo", ["?o?"]),
+        ("foo", ["??o"]),
+        ("foo", ["???"]),
+        # Single asterisk
+        ("", ["*"]),
+        ("foo", ["*", "**", "***"]),
+        ("foo", ["foo*"]),
+        ("foobar", ["foo*"]),
+        ("foobar", ["foo*bar"]),
+        ("foobarbaz", ["foo*baz"]),
+        ("bar", ["*bar"]),
+        ("foobar", ["*bar"]),
+        ("foo/bar", ["foo/*bar"]),
+        ("foo/bar", ["foo/*ar"]),
+        ("foo/bar", ["foo/*r"]),
+        ("foo/bar", ["foo/*"]),
+        ("foo/bar", ["foo*/bar"]),
+        ("foo/bar", ["fo*/bar"]),
+        ("foo/bar", ["f*/bar"]),
+        ("foo/bar", ["*/bar"]),
+        # Double asterisk (matches 0..n directory layers)
+        ("foo/bar", ["foo/**/bar"]),
+        ("foo/1/bar", ["foo/**/bar"]),
+        ("foo/1/22/333/bar", ["foo/**/bar"]),
+        ("foo/", ["foo/**/"]),
+        ("foo/1/", ["foo/**/"]),
+        ("foo/1/22/333/", ["foo/**/"]),
+        ("bar", ["**/bar"]),
+        ("1/bar", ["**/bar"]),
+        ("1/22/333/bar", ["**/bar"]),
+        ("foo/bar/baz", ["foo/**/*"]),
+        # Set
+        ("foo1", ["foo[12]"]),
+        ("foo2", ["foo[12]"]),
+        ("foo2/bar", ["foo[12]/*"]),
+        ("f??f", ["f??f", "f[?][?]f"]),
+        ("foo]", ["foo[]]"]),
+        # Inverted set
+        ("foo3", ["foo[!12]"]),
+        ("foo^", ["foo[^!]"]),
+        ("foo!", ["foo[^!]"]),
+    ],
+)
 def test_match(path, patterns):
 def test_match(path, patterns):
     for p in patterns:
     for p in patterns:
         assert check(path, p)
         assert check(path, p)
 
 
 
 
-@pytest.mark.parametrize("path, patterns", [
-    ("", ["?", "[]"]),
-    ("foo", ["foo?"]),
-    ("foo", ["?foo"]),
-    ("foo", ["f?oo"]),
-
-    # do not match path separator
-    ("foo/ar", ["foo?ar"]),
-
-    # do not match/cross over os.path.sep
-    ("foo/bar", ["*"]),
-    ("foo/bar", ["foo*bar"]),
-    ("foo/bar", ["foo*ar"]),
-    ("foo/bar", ["fo*bar"]),
-    ("foo/bar", ["fo*ar"]),
-
-    # Double asterisk
-    ("foobar", ["foo/**/bar"]),
-
-    # Two asterisks without slash do not match directory separator
-    ("foo/bar", ["**"]),
-
-    # Double asterisk not matching filename
-    ("foo/bar", ["**/"]),
-
-    # Set
-    ("foo3", ["foo[12]"]),
-
-    # Inverted set
-    ("foo1", ["foo[!12]"]),
-    ("foo2", ["foo[!12]"]),
-    ])
+@pytest.mark.parametrize(
+    "path, patterns",
+    [
+        ("", ["?", "[]"]),
+        ("foo", ["foo?"]),
+        ("foo", ["?foo"]),
+        ("foo", ["f?oo"]),
+        # do not match path separator
+        ("foo/ar", ["foo?ar"]),
+        # do not match/cross over os.path.sep
+        ("foo/bar", ["*"]),
+        ("foo/bar", ["foo*bar"]),
+        ("foo/bar", ["foo*ar"]),
+        ("foo/bar", ["fo*bar"]),
+        ("foo/bar", ["fo*ar"]),
+        # Double asterisk
+        ("foobar", ["foo/**/bar"]),
+        # Two asterisks without slash do not match directory separator
+        ("foo/bar", ["**"]),
+        # Double asterisk not matching filename
+        ("foo/bar", ["**/"]),
+        # Set
+        ("foo3", ["foo[12]"]),
+        # Inverted set
+        ("foo1", ["foo[!12]"]),
+        ("foo2", ["foo[!12]"]),
+    ],
+)
 def test_mismatch(path, patterns):
 def test_mismatch(path, patterns):
     for p in patterns:
     for p in patterns:
         assert not check(path, p)
         assert not check(path, p)
@@ -115,10 +108,10 @@ def test_mismatch(path, patterns):
 
 
 def test_match_end():
 def test_match_end():
     regex = shellpattern.translate("*-home")  # default is match_end == string end
     regex = shellpattern.translate("*-home")  # default is match_end == string end
-    assert re.match(regex, '2017-07-03-home')
-    assert not re.match(regex, '2017-07-03-home.checkpoint')
+    assert re.match(regex, "2017-07-03-home")
+    assert not re.match(regex, "2017-07-03-home.checkpoint")
 
 
-    match_end = r'(%s)?\Z' % r'\.checkpoint(\.\d+)?'  # with/without checkpoint ending
+    match_end = r"(%s)?\Z" % r"\.checkpoint(\.\d+)?"  # with/without checkpoint ending
     regex = shellpattern.translate("*-home", match_end=match_end)
     regex = shellpattern.translate("*-home", match_end=match_end)
-    assert re.match(regex, '2017-07-03-home')
-    assert re.match(regex, '2017-07-03-home.checkpoint')
+    assert re.match(regex, "2017-07-03-home")
+    assert re.match(regex, "2017-07-03-home.checkpoint")

+ 41 - 35
src/borg/testsuite/version.py

@@ -3,51 +3,57 @@ import pytest
 from ..version import parse_version, format_version
 from ..version import parse_version, format_version
 
 
 
 
-@pytest.mark.parametrize("version_str, version_tuple", [
-    # setuptools < 8.0 uses "-"
-    ('1.0.0a1.dev204-g8866961.d20170606', (1, 0, 0, -4, 1)),
-    ('1.0.0a1.dev204-g8866961', (1, 0, 0, -4, 1)),
-    ('1.0.0-d20170606', (1, 0, 0, -1)),
-    # setuptools >= 8.0 uses "+"
-    ('1.0.0a1.dev204+g8866961.d20170606', (1, 0, 0, -4, 1)),
-    ('1.0.0a1.dev204+g8866961', (1, 0, 0, -4, 1)),
-    ('1.0.0+d20170606', (1, 0, 0, -1)),
-    # pre-release versions:
-    ('1.0.0a1', (1, 0, 0, -4, 1)),
-    ('1.0.0a2', (1, 0, 0, -4, 2)),
-    ('1.0.0b3', (1, 0, 0, -3, 3)),
-    ('1.0.0rc4', (1, 0, 0, -2, 4)),
-    # release versions:
-    ('0.0.0', (0, 0, 0, -1)),
-    ('0.0.11', (0, 0, 11, -1)),
-    ('0.11.0', (0, 11, 0, -1)),
-    ('11.0.0', (11, 0, 0, -1)),
-])
+@pytest.mark.parametrize(
+    "version_str, version_tuple",
+    [
+        # setuptools < 8.0 uses "-"
+        ("1.0.0a1.dev204-g8866961.d20170606", (1, 0, 0, -4, 1)),
+        ("1.0.0a1.dev204-g8866961", (1, 0, 0, -4, 1)),
+        ("1.0.0-d20170606", (1, 0, 0, -1)),
+        # setuptools >= 8.0 uses "+"
+        ("1.0.0a1.dev204+g8866961.d20170606", (1, 0, 0, -4, 1)),
+        ("1.0.0a1.dev204+g8866961", (1, 0, 0, -4, 1)),
+        ("1.0.0+d20170606", (1, 0, 0, -1)),
+        # pre-release versions:
+        ("1.0.0a1", (1, 0, 0, -4, 1)),
+        ("1.0.0a2", (1, 0, 0, -4, 2)),
+        ("1.0.0b3", (1, 0, 0, -3, 3)),
+        ("1.0.0rc4", (1, 0, 0, -2, 4)),
+        # release versions:
+        ("0.0.0", (0, 0, 0, -1)),
+        ("0.0.11", (0, 0, 11, -1)),
+        ("0.11.0", (0, 11, 0, -1)),
+        ("11.0.0", (11, 0, 0, -1)),
+    ],
+)
 def test_parse_version(version_str, version_tuple):
 def test_parse_version(version_str, version_tuple):
     assert parse_version(version_str) == version_tuple
     assert parse_version(version_str) == version_tuple
 
 
 
 
 def test_parse_version_invalid():
 def test_parse_version_invalid():
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        assert parse_version('')  # we require x.y.z versions
+        assert parse_version("")  # we require x.y.z versions
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        assert parse_version('1')  # we require x.y.z versions
+        assert parse_version("1")  # we require x.y.z versions
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        assert parse_version('1.2')  # we require x.y.z versions
+        assert parse_version("1.2")  # we require x.y.z versions
     with pytest.raises(ValueError):
     with pytest.raises(ValueError):
-        assert parse_version('crap')
+        assert parse_version("crap")
 
 
 
 
-@pytest.mark.parametrize("version_str, version_tuple", [
-    ('1.0.0a1', (1, 0, 0, -4, 1)),
-    ('1.0.0', (1, 0, 0, -1)),
-    ('1.0.0a2', (1, 0, 0, -4, 2)),
-    ('1.0.0b3', (1, 0, 0, -3, 3)),
-    ('1.0.0rc4', (1, 0, 0, -2, 4)),
-    ('0.0.0', (0, 0, 0, -1)),
-    ('0.0.11', (0, 0, 11, -1)),
-    ('0.11.0', (0, 11, 0, -1)),
-    ('11.0.0', (11, 0, 0, -1)),
-])
+@pytest.mark.parametrize(
+    "version_str, version_tuple",
+    [
+        ("1.0.0a1", (1, 0, 0, -4, 1)),
+        ("1.0.0", (1, 0, 0, -1)),
+        ("1.0.0a2", (1, 0, 0, -4, 2)),
+        ("1.0.0b3", (1, 0, 0, -3, 3)),
+        ("1.0.0rc4", (1, 0, 0, -2, 4)),
+        ("0.0.0", (0, 0, 0, -1)),
+        ("0.0.11", (0, 0, 11, -1)),
+        ("0.11.0", (0, 11, 0, -1)),
+        ("11.0.0", (11, 0, 0, -1)),
+    ],
+)
 def test_format_version(version_str, version_tuple):
 def test_format_version(version_str, version_tuple):
     assert format_version(version_tuple) == version_str
     assert format_version(version_tuple) == version_str

+ 24 - 28
src/borg/testsuite/xattr.py

@@ -10,12 +10,11 @@ from ..platformflags import is_linux
 from . import BaseTestCase
 from . import BaseTestCase
 
 
 
 
-@unittest.skipUnless(is_enabled(), 'xattr not enabled on filesystem')
+@unittest.skipUnless(is_enabled(), "xattr not enabled on filesystem")
 class XattrTestCase(BaseTestCase):
 class XattrTestCase(BaseTestCase):
-
     def setUp(self):
     def setUp(self):
         self.tmpfile = tempfile.NamedTemporaryFile()
         self.tmpfile = tempfile.NamedTemporaryFile()
-        self.symlink = self.tmpfile.name + '.symlink'
+        self.symlink = self.tmpfile.name + ".symlink"
         os.symlink(self.tmpfile.name, self.symlink)
         os.symlink(self.tmpfile.name, self.symlink)
 
 
     def tearDown(self):
     def tearDown(self):
@@ -23,7 +22,7 @@ class XattrTestCase(BaseTestCase):
 
 
     def assert_equal_se(self, is_x, want_x):
     def assert_equal_se(self, is_x, want_x):
         # check 2 xattr lists for equality, but ignore security.selinux attr
         # check 2 xattr lists for equality, but ignore security.selinux attr
-        is_x = set(is_x) - {b'security.selinux'}
+        is_x = set(is_x) - {b"security.selinux"}
         want_x = set(want_x)
         want_x = set(want_x)
         self.assert_equal(is_x, want_x)
         self.assert_equal(is_x, want_x)
 
 
@@ -34,32 +33,32 @@ class XattrTestCase(BaseTestCase):
         self.assert_equal_se(listxattr(tmp_fn), [])
         self.assert_equal_se(listxattr(tmp_fn), [])
         self.assert_equal_se(listxattr(tmp_fd), [])
         self.assert_equal_se(listxattr(tmp_fd), [])
         self.assert_equal_se(listxattr(tmp_lfn), [])
         self.assert_equal_se(listxattr(tmp_lfn), [])
-        setxattr(tmp_fn, b'user.foo', b'bar')
-        setxattr(tmp_fd, b'user.bar', b'foo')
-        setxattr(tmp_fn, b'user.empty', b'')
+        setxattr(tmp_fn, b"user.foo", b"bar")
+        setxattr(tmp_fd, b"user.bar", b"foo")
+        setxattr(tmp_fn, b"user.empty", b"")
         if not is_linux:
         if not is_linux:
             # linux does not allow setting user.* xattrs on symlinks
             # linux does not allow setting user.* xattrs on symlinks
-            setxattr(tmp_lfn, b'user.linkxattr', b'baz')
-        self.assert_equal_se(listxattr(tmp_fn), [b'user.foo', b'user.bar', b'user.empty'])
-        self.assert_equal_se(listxattr(tmp_fd), [b'user.foo', b'user.bar', b'user.empty'])
-        self.assert_equal_se(listxattr(tmp_lfn, follow_symlinks=True), [b'user.foo', b'user.bar', b'user.empty'])
+            setxattr(tmp_lfn, b"user.linkxattr", b"baz")
+        self.assert_equal_se(listxattr(tmp_fn), [b"user.foo", b"user.bar", b"user.empty"])
+        self.assert_equal_se(listxattr(tmp_fd), [b"user.foo", b"user.bar", b"user.empty"])
+        self.assert_equal_se(listxattr(tmp_lfn, follow_symlinks=True), [b"user.foo", b"user.bar", b"user.empty"])
         if not is_linux:
         if not is_linux:
-            self.assert_equal_se(listxattr(tmp_lfn), [b'user.linkxattr'])
-        self.assert_equal(getxattr(tmp_fn, b'user.foo'), b'bar')
-        self.assert_equal(getxattr(tmp_fd, b'user.foo'), b'bar')
-        self.assert_equal(getxattr(tmp_lfn, b'user.foo', follow_symlinks=True), b'bar')
+            self.assert_equal_se(listxattr(tmp_lfn), [b"user.linkxattr"])
+        self.assert_equal(getxattr(tmp_fn, b"user.foo"), b"bar")
+        self.assert_equal(getxattr(tmp_fd, b"user.foo"), b"bar")
+        self.assert_equal(getxattr(tmp_lfn, b"user.foo", follow_symlinks=True), b"bar")
         if not is_linux:
         if not is_linux:
-            self.assert_equal(getxattr(tmp_lfn, b'user.linkxattr'), b'baz')
-        self.assert_equal(getxattr(tmp_fn, b'user.empty'), b'')
+            self.assert_equal(getxattr(tmp_lfn, b"user.linkxattr"), b"baz")
+        self.assert_equal(getxattr(tmp_fn, b"user.empty"), b"")
 
 
     def test_listxattr_buffer_growth(self):
     def test_listxattr_buffer_growth(self):
         tmp_fn = os.fsencode(self.tmpfile.name)
         tmp_fn = os.fsencode(self.tmpfile.name)
         # make it work even with ext4, which imposes rather low limits
         # make it work even with ext4, which imposes rather low limits
         buffer.resize(size=64, init=True)
         buffer.resize(size=64, init=True)
         # xattr raw key list will be > 64
         # xattr raw key list will be > 64
-        keys = [b'user.attr%d' % i for i in range(20)]
+        keys = [b"user.attr%d" % i for i in range(20)]
         for key in keys:
         for key in keys:
-            setxattr(tmp_fn, key, b'x')
+            setxattr(tmp_fn, key, b"x")
         got_keys = listxattr(tmp_fn)
         got_keys = listxattr(tmp_fn)
         self.assert_equal_se(got_keys, keys)
         self.assert_equal_se(got_keys, keys)
         assert len(buffer) > 64
         assert len(buffer) > 64
@@ -68,18 +67,15 @@ class XattrTestCase(BaseTestCase):
         tmp_fn = os.fsencode(self.tmpfile.name)
         tmp_fn = os.fsencode(self.tmpfile.name)
         # make it work even with ext4, which imposes rather low limits
         # make it work even with ext4, which imposes rather low limits
         buffer.resize(size=64, init=True)
         buffer.resize(size=64, init=True)
-        value = b'x' * 126
-        setxattr(tmp_fn, b'user.big', value)
-        got_value = getxattr(tmp_fn, b'user.big')
+        value = b"x" * 126
+        setxattr(tmp_fn, b"user.big", value)
+        got_value = getxattr(tmp_fn, b"user.big")
         self.assert_equal(value, got_value)
         self.assert_equal(value, got_value)
         self.assert_equal(len(buffer), 128)
         self.assert_equal(len(buffer), 128)
 
 
 
 
-@pytest.mark.parametrize('lstring, splitted', (
-    (b'', []),
-    (b'\x00', [b'']),
-    (b'\x01a', [b'a']),
-    (b'\x01a\x02cd', [b'a', b'cd']),
-))
+@pytest.mark.parametrize(
+    "lstring, splitted", ((b"", []), (b"\x00", [b""]), (b"\x01a", [b"a"]), (b"\x01a\x02cd", [b"a", b"cd"]))
+)
 def test_split_lstring(lstring, splitted):
 def test_split_lstring(lstring, splitted):
     assert split_lstring(lstring) == splitted
     assert split_lstring(lstring) == splitted

+ 55 - 18
src/borg/upgrade.py

@@ -25,8 +25,16 @@ class UpgraderNoOp:
     def upgrade_archive_metadata(self, *, metadata):
     def upgrade_archive_metadata(self, *, metadata):
         new_metadata = {}
         new_metadata = {}
         # keep all metadata except archive version and stats.
         # keep all metadata except archive version and stats.
-        for attr in ('cmdline', 'hostname', 'username', 'time', 'time_end', 'comment',
-                     'chunker_params', 'recreate_cmdline'):
+        for attr in (
+            "cmdline",
+            "hostname",
+            "username",
+            "time",
+            "time_end",
+            "comment",
+            "chunker_params",
+            "recreate_cmdline",
+        ):
             if hasattr(metadata, attr):
             if hasattr(metadata, attr):
                 new_metadata[attr] = getattr(metadata, attr)
                 new_metadata[attr] = getattr(metadata, attr)
         return new_metadata
         return new_metadata
@@ -42,24 +50,45 @@ class UpgraderFrom12To20:
 
 
     def upgrade_item(self, *, item):
     def upgrade_item(self, *, item):
         """upgrade item as needed, get rid of legacy crap"""
         """upgrade item as needed, get rid of legacy crap"""
-        ITEM_KEY_WHITELIST = {'path', 'source', 'rdev', 'chunks', 'chunks_healthy', 'hlid',
-                              'mode', 'user', 'group', 'uid', 'gid', 'mtime', 'atime', 'ctime', 'birthtime', 'size',
-                              'xattrs', 'bsdflags', 'acl_nfs4', 'acl_access', 'acl_default', 'acl_extended',
-                              'part'}
+        ITEM_KEY_WHITELIST = {
+            "path",
+            "source",
+            "rdev",
+            "chunks",
+            "chunks_healthy",
+            "hlid",
+            "mode",
+            "user",
+            "group",
+            "uid",
+            "gid",
+            "mtime",
+            "atime",
+            "ctime",
+            "birthtime",
+            "size",
+            "xattrs",
+            "bsdflags",
+            "acl_nfs4",
+            "acl_access",
+            "acl_default",
+            "acl_extended",
+            "part",
+        }
 
 
         if self.hlm.borg1_hardlink_master(item):
         if self.hlm.borg1_hardlink_master(item):
-            item._dict['hlid'] = hlid = self.hlm.hardlink_id_from_path(item._dict['path'])
-            self.hlm.remember(id=hlid, info=(item._dict.get('chunks'), item._dict.get('chunks_healthy')))
+            item._dict["hlid"] = hlid = self.hlm.hardlink_id_from_path(item._dict["path"])
+            self.hlm.remember(id=hlid, info=(item._dict.get("chunks"), item._dict.get("chunks_healthy")))
         elif self.hlm.borg1_hardlink_slave(item):
         elif self.hlm.borg1_hardlink_slave(item):
-            item._dict['hlid'] = hlid = self.hlm.hardlink_id_from_path(item._dict['source'])
+            item._dict["hlid"] = hlid = self.hlm.hardlink_id_from_path(item._dict["source"])
             chunks, chunks_healthy = self.hlm.retrieve(id=hlid, default=(None, None))
             chunks, chunks_healthy = self.hlm.retrieve(id=hlid, default=(None, None))
             if chunks is not None:
             if chunks is not None:
-                item._dict['chunks'] = chunks
+                item._dict["chunks"] = chunks
                 for chunk_id, _ in chunks:
                 for chunk_id, _ in chunks:
                     self.cache.chunk_incref(chunk_id, self.archive.stats)
                     self.cache.chunk_incref(chunk_id, self.archive.stats)
             if chunks_healthy is not None:
             if chunks_healthy is not None:
-                item._dict['chunks_healthy'] = chunks
-            item._dict.pop('source')  # not used for hardlinks any more, replaced by hlid
+                item._dict["chunks_healthy"] = chunks
+            item._dict.pop("source")  # not used for hardlinks any more, replaced by hlid
         # make sure we only have desired stuff in the new item. specifically, make sure to get rid of:
         # make sure we only have desired stuff in the new item. specifically, make sure to get rid of:
         # - 'acl' remnants of bug in attic <= 0.13
         # - 'acl' remnants of bug in attic <= 0.13
         # - 'hardlink_master' (superseded by hlid)
         # - 'hardlink_master' (superseded by hlid)
@@ -80,17 +109,17 @@ class UpgraderFrom12To20:
             return chunk
             return chunk
 
 
         ctype = chunk[0:1]
         ctype = chunk[0:1]
-        level = b'\xFF'  # FF means unknown compression level
+        level = b"\xFF"  # FF means unknown compression level
 
 
         if ctype == ObfuscateSize.ID:
         if ctype == ObfuscateSize.ID:
             # in older borg, we used unusual byte order
             # in older borg, we used unusual byte order
-            old_header_fmt = Struct('>I')
+            old_header_fmt = Struct(">I")
             new_header_fmt = ObfuscateSize.header_fmt
             new_header_fmt = ObfuscateSize.header_fmt
             length = ObfuscateSize.header_len
             length = ObfuscateSize.header_len
-            size_bytes = chunk[2:2+length]
+            size_bytes = chunk[2 : 2 + length]
             size = old_header_fmt.unpack(size_bytes)
             size = old_header_fmt.unpack(size_bytes)
             size_bytes = new_header_fmt.pack(size)
             size_bytes = new_header_fmt.pack(size)
-            compressed = chunk[2+length:]
+            compressed = chunk[2 + length :]
             compressed = upgrade_zlib_and_level(compressed)
             compressed = upgrade_zlib_and_level(compressed)
             chunk = ctype + level + size_bytes + compressed
             chunk = ctype + level + size_bytes + compressed
         else:
         else:
@@ -101,8 +130,16 @@ class UpgraderFrom12To20:
         new_metadata = {}
         new_metadata = {}
         # keep all metadata except archive version and stats. also do not keep
         # keep all metadata except archive version and stats. also do not keep
         # recreate_source_id, recreate_args, recreate_partial_chunks which were used only in 1.1.0b1 .. b2.
         # recreate_source_id, recreate_args, recreate_partial_chunks which were used only in 1.1.0b1 .. b2.
-        for attr in ('cmdline', 'hostname', 'username', 'time', 'time_end', 'comment',
-                     'chunker_params', 'recreate_cmdline'):
+        for attr in (
+            "cmdline",
+            "hostname",
+            "username",
+            "time",
+            "time_end",
+            "comment",
+            "chunker_params",
+            "recreate_cmdline",
+        ):
             if hasattr(metadata, attr):
             if hasattr(metadata, attr):
                 new_metadata[attr] = getattr(metadata, attr)
                 new_metadata[attr] = getattr(metadata, attr)
         return new_metadata
         return new_metadata

+ 7 - 7
src/borg/version.py

@@ -21,12 +21,12 @@ def parse_version(version):
     """
     """
     m = re.match(version_re, version, re.VERBOSE)
     m = re.match(version_re, version, re.VERBOSE)
     if m is None:
     if m is None:
-        raise ValueError('Invalid version string %s' % version)
+        raise ValueError("Invalid version string %s" % version)
     gd = m.groupdict()
     gd = m.groupdict()
-    version = [int(gd['major']), int(gd['minor']), int(gd['patch'])]
-    if m.lastgroup == 'prerelease':
-        p_type = {'a': -4, 'b': -3, 'rc': -2}[gd['ptype']]
-        p_num = int(gd['pnum'])
+    version = [int(gd["major"]), int(gd["minor"]), int(gd["patch"])]
+    if m.lastgroup == "prerelease":
+        p_type = {"a": -4, "b": -3, "rc": -2}[gd["ptype"]]
+        p_num = int(gd["pnum"])
         version += [p_type, p_num]
         version += [p_type, p_num]
     else:
     else:
         version += [-1]
         version += [-1]
@@ -44,6 +44,6 @@ def format_version(version):
         elif part == -1:
         elif part == -1:
             break
             break
         else:
         else:
-            f[-1] = f[-1] + {-2: 'rc', -3: 'b', -4: 'a'}[part] + str(next(it))
+            f[-1] = f[-1] + {-2: "rc", -3: "b", -4: "a"}[part] + str(next(it))
             break
             break
-    return '.'.join(f)
+    return ".".join(f)

+ 16 - 16
src/borg/xattr.py

@@ -22,14 +22,14 @@ from .platform import listxattr, getxattr, setxattr, ENOATTR
 # TODO: Check whether fakeroot supports xattrs on all platforms supported below.
 # TODO: Check whether fakeroot supports xattrs on all platforms supported below.
 # TODO: If that's the case then we can make Borg fakeroot-xattr-compatible on these as well.
 # TODO: If that's the case then we can make Borg fakeroot-xattr-compatible on these as well.
 XATTR_FAKEROOT = False
 XATTR_FAKEROOT = False
-if sys.platform.startswith('linux'):
-    LD_PRELOAD = os.environ.get('LD_PRELOAD', '')
+if sys.platform.startswith("linux"):
+    LD_PRELOAD = os.environ.get("LD_PRELOAD", "")
     preloads = re.split("[ :]", LD_PRELOAD)
     preloads = re.split("[ :]", LD_PRELOAD)
     for preload in preloads:
     for preload in preloads:
         if preload.startswith("libfakeroot"):
         if preload.startswith("libfakeroot"):
             env = prepare_subprocess_env(system=True)
             env = prepare_subprocess_env(system=True)
-            fakeroot_output = subprocess.check_output(['fakeroot', '-v'], env=env)
-            fakeroot_version = parse_version(fakeroot_output.decode('ascii').split()[-1])
+            fakeroot_output = subprocess.check_output(["fakeroot", "-v"], env=env)
+            fakeroot_version = parse_version(fakeroot_output.decode("ascii").split()[-1])
             if fakeroot_version >= parse_version("1.20.2"):
             if fakeroot_version >= parse_version("1.20.2"):
                 # 1.20.2 has been confirmed to have xattr support
                 # 1.20.2 has been confirmed to have xattr support
                 # 1.18.2 has been confirmed not to have xattr support
                 # 1.18.2 has been confirmed not to have xattr support
@@ -39,11 +39,10 @@ if sys.platform.startswith('linux'):
 
 
 
 
 def is_enabled(path=None):
 def is_enabled(path=None):
-    """Determine if xattr is enabled on the filesystem
-    """
-    with tempfile.NamedTemporaryFile(dir=path, prefix='borg-tmp') as f:
+    """Determine if xattr is enabled on the filesystem"""
+    with tempfile.NamedTemporaryFile(dir=path, prefix="borg-tmp") as f:
         fd = f.fileno()
         fd = f.fileno()
-        name, value = b'user.name', b'value'
+        name, value = b"user.name", b"value"
         try:
         try:
             setxattr(fd, name, value)
             setxattr(fd, name, value)
         except OSError:
         except OSError:
@@ -80,7 +79,7 @@ def get_all(path, follow_symlinks=False):
             except OSError as e:
             except OSError as e:
                 name_str = name.decode()
                 name_str = name.decode()
                 if isinstance(path, int):
                 if isinstance(path, int):
-                    path_str = '<FD %d>' % path
+                    path_str = "<FD %d>" % path
                 else:
                 else:
                     path_str = os.fsdecode(path)
                     path_str = os.fsdecode(path)
                 if e.errno == ENOATTR:
                 if e.errno == ENOATTR:
@@ -89,8 +88,9 @@ def get_all(path, follow_symlinks=False):
                     pass
                     pass
                 elif e.errno == errno.EPERM:
                 elif e.errno == errno.EPERM:
                     # we were not permitted to read this attribute, still can continue trying to read others
                     # we were not permitted to read this attribute, still can continue trying to read others
-                    logger.warning('{}: Operation not permitted when reading extended attribute {}'.format(
-                                   path_str, name_str))
+                    logger.warning(
+                        "{}: Operation not permitted when reading extended attribute {}".format(path_str, name_str)
+                    )
                 else:
                 else:
                     raise
                     raise
     except OSError as e:
     except OSError as e:
@@ -125,21 +125,21 @@ def set_all(path, xattrs, follow_symlinks=False):
             warning = True
             warning = True
             k_str = k.decode()
             k_str = k.decode()
             if isinstance(path, int):
             if isinstance(path, int):
-                path_str = '<FD %d>' % path
+                path_str = "<FD %d>" % path
             else:
             else:
                 path_str = os.fsdecode(path)
                 path_str = os.fsdecode(path)
             if e.errno == errno.E2BIG:
             if e.errno == errno.E2BIG:
-                err_str = 'too big for this filesystem'
+                err_str = "too big for this filesystem"
             elif e.errno == errno.ENOTSUP:
             elif e.errno == errno.ENOTSUP:
-                err_str = 'xattrs not supported on this filesystem'
+                err_str = "xattrs not supported on this filesystem"
             elif e.errno == errno.ENOSPC:
             elif e.errno == errno.ENOSPC:
                 # ext4 reports ENOSPC when trying to set an xattr with >4kiB while ext4 can only support 4kiB xattrs
                 # ext4 reports ENOSPC when trying to set an xattr with >4kiB while ext4 can only support 4kiB xattrs
                 # (in this case, this is NOT a "disk full" error, just a ext4 limitation).
                 # (in this case, this is NOT a "disk full" error, just a ext4 limitation).
-                err_str = 'no space left on device [xattr len = %d]' % (len(v),)
+                err_str = "no space left on device [xattr len = %d]" % (len(v),)
             else:
             else:
                 # generic handler
                 # generic handler
                 # EACCES: permission denied to set this specific xattr (this may happen related to security.* keys)
                 # EACCES: permission denied to set this specific xattr (this may happen related to security.* keys)
                 # EPERM: operation not permitted
                 # EPERM: operation not permitted
                 err_str = os.strerror(e.errno)
                 err_str = os.strerror(e.errno)
-            logger.warning('%s: when setting extended attribute %s: %s', path_str, k_str, err_str)
+            logger.warning("%s: when setting extended attribute %s: %s", path_str, k_str, err_str)
     return warning
     return warning

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff