2
0
Эх сурвалжийг харах

When running tests, use Ruff for faster and more comprehensive code linting and formatting.

Dan Helfman 2 өдөр өмнө
parent
commit
9a80fec91b
100 өөрчлөгдсөн 1477 нэмэгдсэн , 984 устгасан
  1. 2 0
      NEWS
  2. 2 1
      borgmatic/actions/borg.py
  3. 2 1
      borgmatic/actions/break_lock.py
  4. 2 1
      borgmatic/actions/change_passphrase.py
  5. 58 55
      borgmatic/actions/check.py
  6. 2 1
      borgmatic/actions/compact.py
  7. 11 6
      borgmatic/actions/config/bootstrap.py
  8. 3 3
      borgmatic/actions/config/generate.py
  9. 1 1
      borgmatic/actions/config/validate.py
  10. 11 5
      borgmatic/actions/create.py
  11. 2 1
      borgmatic/actions/delete.py
  12. 2 1
      borgmatic/actions/export_key.py
  13. 2 1
      borgmatic/actions/export_tar.py
  14. 2 1
      borgmatic/actions/extract.py
  15. 2 1
      borgmatic/actions/import_key.py
  16. 2 1
      borgmatic/actions/info.py
  17. 2 1
      borgmatic/actions/list.py
  18. 2 1
      borgmatic/actions/mount.py
  19. 14 12
      borgmatic/actions/pattern.py
  20. 2 1
      borgmatic/actions/prune.py
  21. 14 10
      borgmatic/actions/recreate.py
  22. 3 2
      borgmatic/actions/repo_create.py
  23. 3 2
      borgmatic/actions/repo_delete.py
  24. 2 1
      borgmatic/actions/repo_info.py
  25. 2 1
      borgmatic/actions/repo_list.py
  26. 27 20
      borgmatic/actions/restore.py
  27. 1 1
      borgmatic/actions/transfer.py
  28. 6 8
      borgmatic/borg/borg.py
  29. 2 2
      borgmatic/borg/change_passphrase.py
  30. 10 5
      borgmatic/borg/check.py
  31. 1 1
      borgmatic/borg/compact.py
  32. 31 20
      borgmatic/borg/create.py
  33. 10 3
      borgmatic/borg/delete.py
  34. 2 1
      borgmatic/borg/environment.py
  35. 1 1
      borgmatic/borg/export_key.py
  36. 2 5
      borgmatic/borg/export_tar.py
  37. 5 1
      borgmatic/borg/extract.py
  38. 17 17
      borgmatic/borg/flags.py
  39. 4 1
      borgmatic/borg/info.py
  40. 14 9
      borgmatic/borg/list.py
  41. 10 10
      borgmatic/borg/pattern.py
  42. 4 2
      borgmatic/borg/recreate.py
  43. 3 1
      borgmatic/borg/rename.py
  44. 3 3
      borgmatic/borg/repo_create.py
  45. 11 3
      borgmatic/borg/repo_delete.py
  46. 11 9
      borgmatic/borg/repo_info.py
  47. 15 15
      borgmatic/borg/repo_list.py
  48. 228 88
      borgmatic/commands/arguments.py
  49. 125 110
      borgmatic/commands/borgmatic.py
  50. 4 4
      borgmatic/commands/completion/actions.py
  51. 8 9
      borgmatic/commands/completion/bash.py
  52. 23 22
      borgmatic/commands/completion/fish.py
  53. 1 1
      borgmatic/commands/completion/flag.py
  54. 2 2
      borgmatic/commands/generate_config.py
  55. 2 2
      borgmatic/commands/validate_config.py
  56. 1 1
      borgmatic/config/arguments.py
  57. 1 1
      borgmatic/config/collect.py
  58. 12 12
      borgmatic/config/constants.py
  59. 1 1
      borgmatic/config/environment.py
  60. 45 29
      borgmatic/config/generate.py
  61. 21 16
      borgmatic/config/load.py
  62. 44 50
      borgmatic/config/normalize.py
  63. 15 15
      borgmatic/config/override.py
  64. 8 8
      borgmatic/config/paths.py
  65. 4 10
      borgmatic/config/schema.py
  66. 15 8
      borgmatic/config/validate.py
  67. 30 18
      borgmatic/execute.py
  68. 4 4
      borgmatic/hooks/command.py
  69. 2 1
      borgmatic/hooks/credential/container.py
  70. 2 1
      borgmatic/hooks/credential/file.py
  71. 4 1
      borgmatic/hooks/credential/parse.py
  72. 4 2
      borgmatic/hooks/credential/systemd.py
  73. 15 12
      borgmatic/hooks/data_source/bootstrap.py
  74. 32 24
      borgmatic/hooks/data_source/btrfs.py
  75. 3 1
      borgmatic/hooks/data_source/dump.py
  76. 36 25
      borgmatic/hooks/data_source/lvm.py
  77. 34 16
      borgmatic/hooks/data_source/mariadb.py
  78. 29 13
      borgmatic/hooks/data_source/mongodb.py
  79. 40 16
      borgmatic/hooks/data_source/mysql.py
  80. 34 19
      borgmatic/hooks/data_source/postgresql.py
  81. 24 12
      borgmatic/hooks/data_source/sqlite.py
  82. 32 24
      borgmatic/hooks/data_source/zfs.py
  83. 2 2
      borgmatic/hooks/dispatch.py
  84. 9 7
      borgmatic/hooks/monitoring/apprise.py
  85. 5 3
      borgmatic/hooks/monitoring/cronhub.py
  86. 5 3
      borgmatic/hooks/monitoring/cronitor.py
  87. 6 4
      borgmatic/hooks/monitoring/healthchecks.py
  88. 2 3
      borgmatic/hooks/monitoring/logs.py
  89. 6 4
      borgmatic/hooks/monitoring/loki.py
  90. 16 8
      borgmatic/hooks/monitoring/ntfy.py
  91. 11 6
      borgmatic/hooks/monitoring/pagerduty.py
  92. 11 9
      borgmatic/hooks/monitoring/pushover.py
  93. 7 5
      borgmatic/hooks/monitoring/sentry.py
  94. 5 3
      borgmatic/hooks/monitoring/uptime_kuma.py
  95. 12 7
      borgmatic/hooks/monitoring/zabbix.py
  96. 35 39
      borgmatic/logger.py
  97. 23 20
      docs/how-to/develop-on-borgmatic.md
  98. 83 9
      pyproject.toml
  99. 1 1
      scripts/run-full-tests
  100. 13 24
      test_requirements.txt

+ 2 - 0
NEWS

@@ -1,6 +1,8 @@
 2.0.8.dev0
 2.0.8.dev0
  * #1118: Fix a bug in which Borg hangs during database backup when different filesystems are in
  * #1118: Fix a bug in which Borg hangs during database backup when different filesystems are in
    use.
    use.
+ * When running tests, use Ruff for faster and more comprehensive code linting and formatting,
+   replacing Flake8, Black, isort, etc.
 
 
 2.0.7
 2.0.7
  * #1032: Fix a bug in which a Borg archive gets created even when a database hook fails.
  * #1032: Fix a bug in which a Borg archive gets created even when a database hook fails.

+ 2 - 1
borgmatic/actions/borg.py

@@ -20,7 +20,8 @@ def run_borg(
     Run the "borg" action for the given repository.
     Run the "borg" action for the given repository.
     '''
     '''
     if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, borg_arguments.repository
+        repository,
+        borg_arguments.repository,
     ):
     ):
         logger.info('Running arbitrary Borg command')
         logger.info('Running arbitrary Borg command')
         archive_name = borgmatic.borg.repo_list.resolve_archive_name(
         archive_name = borgmatic.borg.repo_list.resolve_archive_name(

+ 2 - 1
borgmatic/actions/break_lock.py

@@ -19,7 +19,8 @@ def run_break_lock(
     Run the "break-lock" action for the given repository.
     Run the "break-lock" action for the given repository.
     '''
     '''
     if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, break_lock_arguments.repository
+        repository,
+        break_lock_arguments.repository,
     ):
     ):
         logger.info('Breaking repository and cache locks')
         logger.info('Breaking repository and cache locks')
         borgmatic.borg.break_lock.break_lock(
         borgmatic.borg.break_lock.break_lock(

+ 2 - 1
borgmatic/actions/change_passphrase.py

@@ -21,7 +21,8 @@ def run_change_passphrase(
     if (
     if (
         change_passphrase_arguments.repository is None
         change_passphrase_arguments.repository is None
         or borgmatic.config.validate.repositories_match(
         or borgmatic.config.validate.repositories_match(
-            repository, change_passphrase_arguments.repository
+            repository,
+            change_passphrase_arguments.repository,
         )
         )
     ):
     ):
         logger.info('Changing repository passphrase')
         logger.info('Changing repository passphrase')

+ 58 - 55
borgmatic/actions/check.py

@@ -1,4 +1,5 @@
 import calendar
 import calendar
+import contextlib
 import datetime
 import datetime
 import hashlib
 import hashlib
 import itertools
 import itertools
@@ -55,12 +56,12 @@ def parse_checks(config, only_checks=None):
 
 
     if 'disabled' in checks:
     if 'disabled' in checks:
         logger.warning(
         logger.warning(
-            'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead'
+            'The "disabled" value for the "checks" option is deprecated and will be removed from a future release; use "skip_actions" instead',
         )
         )
 
 
         if len(checks) > 1:
         if len(checks) > 1:
             logger.warning(
             logger.warning(
-                'Multiple checks are configured, but one of them is "disabled"; not running any checks'
+                'Multiple checks are configured, but one of them is "disabled"; not running any checks',
             )
             )
 
 
         return ()
         return ()
@@ -175,7 +176,7 @@ def filter_checks_on_frequency(
 
 
             if calendar.day_name[datetime_now().weekday()] not in days:
             if calendar.day_name[datetime_now().weekday()] not in days:
                 logger.info(
                 logger.info(
-                    f"Skipping {check} check due to day of the week; check only runs on {'/'.join(day.title() for day in days)} (use --force to check anyway)"
+                    f"Skipping {check} check due to day of the week; check only runs on {'/'.join(day.title() for day in days)} (use --force to check anyway)",
                 )
                 )
                 filtered_checks.remove(check)
                 filtered_checks.remove(check)
                 continue
                 continue
@@ -193,7 +194,7 @@ def filter_checks_on_frequency(
         if datetime_now() < check_time + frequency_delta:
         if datetime_now() < check_time + frequency_delta:
             remaining = check_time + frequency_delta - datetime_now()
             remaining = check_time + frequency_delta - datetime_now()
             logger.info(
             logger.info(
-                f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)'
+                f'Skipping {check} check due to configured frequency; {remaining} until next check (use --force to check anyway)',
             )
             )
             filtered_checks.remove(check)
             filtered_checks.remove(check)
 
 
@@ -219,7 +220,7 @@ def make_check_time_path(config, borg_repository_id, check_type, archives_check_
     '''
     '''
     borgmatic_state_directory = borgmatic.config.paths.get_borgmatic_state_directory(config)
     borgmatic_state_directory = borgmatic.config.paths.get_borgmatic_state_directory(config)
 
 
-    if check_type in ('archives', 'data'):
+    if check_type in {'archives', 'data'}:
         return os.path.join(
         return os.path.join(
             borgmatic_state_directory,
             borgmatic_state_directory,
             'checks',
             'checks',
@@ -254,7 +255,7 @@ def read_check_time(path):
     logger.debug(f'Reading check time from {path}')
     logger.debug(f'Reading check time from {path}')
 
 
     try:
     try:
-        return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
+        return datetime.datetime.fromtimestamp(os.stat(path).st_mtime)  # noqa: DTZ006
     except FileNotFoundError:
     except FileNotFoundError:
         return None
         return None
 
 
@@ -285,7 +286,7 @@ def probe_for_check_time(config, borg_repository_id, check, archives_check_id):
             (
             (
                 make_check_time_path(config, borg_repository_id, check, archives_check_id),
                 make_check_time_path(config, borg_repository_id, check, archives_check_id),
                 make_check_time_path(config, borg_repository_id, check),
                 make_check_time_path(config, borg_repository_id, check),
-            )
+            ),
         )
         )
     )
     )
 
 
@@ -317,16 +318,17 @@ def upgrade_check_times(config, borg_repository_id):
       {borgmatic_state_directory}/checks/1234567890/archives/all
       {borgmatic_state_directory}/checks/1234567890/archives/all
     '''
     '''
     borgmatic_source_checks_path = os.path.join(
     borgmatic_source_checks_path = os.path.join(
-        borgmatic.config.paths.get_borgmatic_source_directory(config), 'checks'
+        borgmatic.config.paths.get_borgmatic_source_directory(config),
+        'checks',
     )
     )
     borgmatic_state_path = borgmatic.config.paths.get_borgmatic_state_directory(config)
     borgmatic_state_path = borgmatic.config.paths.get_borgmatic_state_directory(config)
     borgmatic_state_checks_path = os.path.join(borgmatic_state_path, 'checks')
     borgmatic_state_checks_path = os.path.join(borgmatic_state_path, 'checks')
 
 
     if os.path.exists(borgmatic_source_checks_path) and not os.path.exists(
     if os.path.exists(borgmatic_source_checks_path) and not os.path.exists(
-        borgmatic_state_checks_path
+        borgmatic_state_checks_path,
     ):
     ):
         logger.debug(
         logger.debug(
-            f'Upgrading archives check times directory from {borgmatic_source_checks_path} to {borgmatic_state_checks_path}'
+            f'Upgrading archives check times directory from {borgmatic_source_checks_path} to {borgmatic_state_checks_path}',
         )
         )
         os.makedirs(borgmatic_state_path, mode=0o700, exist_ok=True)
         os.makedirs(borgmatic_state_path, mode=0o700, exist_ok=True)
         shutil.move(borgmatic_source_checks_path, borgmatic_state_checks_path)
         shutil.move(borgmatic_source_checks_path, borgmatic_state_checks_path)
@@ -341,10 +343,8 @@ def upgrade_check_times(config, borg_repository_id):
 
 
         logger.debug(f'Upgrading archives check time file from {old_path} to {new_path}')
         logger.debug(f'Upgrading archives check time file from {old_path} to {new_path}')
 
 
-        try:
+        with contextlib.suppress(FileNotFoundError):
             shutil.move(old_path, temporary_path)
             shutil.move(old_path, temporary_path)
-        except FileNotFoundError:
-            pass
 
 
         os.mkdir(old_path)
         os.mkdir(old_path)
         shutil.move(temporary_path, new_path)
         shutil.move(temporary_path, new_path)
@@ -369,31 +369,29 @@ def collect_spot_check_source_paths(
             'use_streaming',
             'use_streaming',
             config,
             config,
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
-        ).values()
+        ).values(),
     )
     )
     working_directory = borgmatic.config.paths.get_working_directory(config)
     working_directory = borgmatic.config.paths.get_working_directory(config)
 
 
-    (create_flags, create_positional_arguments, pattern_file) = (
-        borgmatic.borg.create.make_base_create_command(
-            dry_run=True,
-            repository_path=repository['path'],
-            # Omit "progress" because it interferes with "list_details".
-            config=dict(
-                {option: value for option, value in config.items() if option != 'progress'},
-                list_details=True,
-            ),
-            patterns=borgmatic.actions.pattern.process_patterns(
-                borgmatic.actions.pattern.collect_patterns(config),
-                config,
-                working_directory,
-            ),
-            local_borg_version=local_borg_version,
-            global_arguments=global_arguments,
-            borgmatic_runtime_directory=borgmatic_runtime_directory,
-            local_path=local_path,
-            remote_path=remote_path,
-            stream_processes=stream_processes,
-        )
+    (create_flags, create_positional_arguments, _) = borgmatic.borg.create.make_base_create_command(
+        dry_run=True,
+        repository_path=repository['path'],
+        # Omit "progress" because it interferes with "list_details".
+        config=dict(
+            {option: value for option, value in config.items() if option != 'progress'},
+            list_details=True,
+        ),
+        patterns=borgmatic.actions.pattern.process_patterns(
+            borgmatic.actions.pattern.collect_patterns(config),
+            config,
+            working_directory,
+        ),
+        local_borg_version=local_borg_version,
+        global_arguments=global_arguments,
+        borgmatic_runtime_directory=borgmatic_runtime_directory,
+        local_path=local_path,
+        remote_path=remote_path,
+        stream_processes=stream_processes,
     )
     )
     working_directory = borgmatic.config.paths.get_working_directory(config)
     working_directory = borgmatic.config.paths.get_working_directory(config)
 
 
@@ -409,7 +407,7 @@ def collect_spot_check_source_paths(
     paths = tuple(
     paths = tuple(
         path_line.split(' ', 1)[1]
         path_line.split(' ', 1)[1]
         for path_line in paths_output.splitlines()
         for path_line in paths_output.splitlines()
-        if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
+        if path_line and path_line.startswith(('- ', '+ '))
     )
     )
 
 
     return tuple(
     return tuple(
@@ -450,12 +448,12 @@ def collect_spot_check_archive_paths(
             config,
             config,
             local_borg_version,
             local_borg_version,
             global_arguments,
             global_arguments,
-            path_format='{type} {path}{NUL}',  # noqa: FS003
+            path_format='{type} {path}{NUL}',
             local_path=local_path,
             local_path=local_path,
             remote_path=remote_path,
             remote_path=remote_path,
         )
         )
         for (file_type, path) in (line.split(' ', 1),)
         for (file_type, path) in (line.split(' ', 1),)
-        if file_type not in (BORG_DIRECTORY_FILE_TYPE, BORG_PIPE_FILE_TYPE)
+        if file_type not in {BORG_DIRECTORY_FILE_TYPE, BORG_PIPE_FILE_TYPE}
         if pathlib.Path('borgmatic') not in pathlib.Path(path).parents
         if pathlib.Path('borgmatic') not in pathlib.Path(path).parents
         if pathlib.Path(borgmatic_source_directory.lstrip(os.path.sep))
         if pathlib.Path(borgmatic_source_directory.lstrip(os.path.sep))
         not in pathlib.Path(path).parents
         not in pathlib.Path(path).parents
@@ -488,7 +486,8 @@ def compare_spot_check_hashes(
     # source directories.
     # source directories.
     spot_check_config = next(check for check in config['checks'] if check['name'] == 'spot')
     spot_check_config = next(check for check in config['checks'] if check['name'] == 'spot')
     sample_count = max(
     sample_count = max(
-        int(len(source_paths) * (min(spot_check_config['data_sample_percentage'], 100) / 100)), 1
+        int(len(source_paths) * (min(spot_check_config['data_sample_percentage'], 100) / 100)),
+        1,
     )
     )
     source_sample_paths = tuple(random.SystemRandom().sample(source_paths, sample_count))
     source_sample_paths = tuple(random.SystemRandom().sample(source_paths, sample_count))
     working_directory = borgmatic.config.paths.get_working_directory(config)
     working_directory = borgmatic.config.paths.get_working_directory(config)
@@ -500,7 +499,7 @@ def compare_spot_check_hashes(
         if not os.path.islink(full_source_path)
         if not os.path.islink(full_source_path)
     }
     }
     logger.debug(
     logger.debug(
-        f'Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check'
+        f'Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check',
     )
     )
 
 
     source_sample_paths_iterator = iter(source_sample_paths)
     source_sample_paths_iterator = iter(source_sample_paths)
@@ -512,7 +511,7 @@ def compare_spot_check_hashes(
     while True:
     while True:
         # Hash each file in the sample paths (if it exists).
         # Hash each file in the sample paths (if it exists).
         source_sample_paths_subset = tuple(
         source_sample_paths_subset = tuple(
-            itertools.islice(source_sample_paths_iterator, SAMPLE_PATHS_SUBSET_COUNT)
+            itertools.islice(source_sample_paths_iterator, SAMPLE_PATHS_SUBSET_COUNT),
         )
         )
         if not source_sample_paths_subset:
         if not source_sample_paths_subset:
             break
             break
@@ -539,7 +538,7 @@ def compare_spot_check_hashes(
                     for path in source_sample_paths_subset
                     for path in source_sample_paths_subset
                     if path not in hashable_source_sample_path
                     if path not in hashable_source_sample_path
                 },
                 },
-            )
+            ),
         )
         )
 
 
         # Get the hash for each file in the archive.
         # Get the hash for each file in the archive.
@@ -553,12 +552,12 @@ def compare_spot_check_hashes(
                     local_borg_version,
                     local_borg_version,
                     global_arguments,
                     global_arguments,
                     list_paths=source_sample_paths_subset,
                     list_paths=source_sample_paths_subset,
-                    path_format='{xxh64} {path}{NUL}',  # noqa: FS003
+                    path_format='{xxh64} {path}{NUL}',
                     local_path=local_path,
                     local_path=local_path,
                     remote_path=remote_path,
                     remote_path=remote_path,
                 )
                 )
                 if line
                 if line
-            )
+            ),
         )
         )
 
 
     # Compare the source hashes with the archive hashes to see how many match.
     # Compare the source hashes with the archive hashes to see how many match.
@@ -607,7 +606,7 @@ def spot_check(
 
 
     if spot_check_config['data_tolerance_percentage'] > spot_check_config['data_sample_percentage']:
     if spot_check_config['data_tolerance_percentage'] > spot_check_config['data_sample_percentage']:
         raise ValueError(
         raise ValueError(
-            'The data_tolerance_percentage must be less than or equal to the data_sample_percentage'
+            'The data_tolerance_percentage must be less than or equal to the data_sample_percentage',
         )
         )
 
 
     source_paths = collect_spot_check_source_paths(
     source_paths = collect_spot_check_source_paths(
@@ -652,7 +651,7 @@ def spot_check(
         )
         )
         logger.debug(f'Paths in latest archive but not source paths: {truncated_archive_paths}')
         logger.debug(f'Paths in latest archive but not source paths: {truncated_archive_paths}')
         raise ValueError(
         raise ValueError(
-            'Spot check failed: There are no source paths to compare against the archive'
+            'Spot check failed: There are no source paths to compare against the archive',
         )
         )
 
 
     # Calculate the percentage delta between the source paths count and the archive paths count, and
     # Calculate the percentage delta between the source paths count and the archive paths count, and
@@ -660,14 +659,14 @@ def spot_check(
     count_delta_percentage = abs(len(source_paths) - len(archive_paths)) / len(source_paths) * 100
     count_delta_percentage = abs(len(source_paths) - len(archive_paths)) / len(source_paths) * 100
 
 
     if count_delta_percentage > spot_check_config['count_tolerance_percentage']:
     if count_delta_percentage > spot_check_config['count_tolerance_percentage']:
-        rootless_source_paths = set(path.lstrip(os.path.sep) for path in source_paths)
+        rootless_source_paths = {path.lstrip(os.path.sep) for path in source_paths}
         truncated_exclusive_source_paths = textwrap.shorten(
         truncated_exclusive_source_paths = textwrap.shorten(
             ', '.join(rootless_source_paths - set(archive_paths)) or 'none',
             ', '.join(rootless_source_paths - set(archive_paths)) or 'none',
             width=MAX_SPOT_CHECK_PATHS_LENGTH,
             width=MAX_SPOT_CHECK_PATHS_LENGTH,
             placeholder=' ...',
             placeholder=' ...',
         )
         )
         logger.debug(
         logger.debug(
-            f'Paths in source paths but not latest archive: {truncated_exclusive_source_paths}'
+            f'Paths in source paths but not latest archive: {truncated_exclusive_source_paths}',
         )
         )
         truncated_exclusive_archive_paths = textwrap.shorten(
         truncated_exclusive_archive_paths = textwrap.shorten(
             ', '.join(set(archive_paths) - rootless_source_paths) or 'none',
             ', '.join(set(archive_paths) - rootless_source_paths) or 'none',
@@ -675,10 +674,10 @@ def spot_check(
             placeholder=' ...',
             placeholder=' ...',
         )
         )
         logger.debug(
         logger.debug(
-            f'Paths in latest archive but not source paths: {truncated_exclusive_archive_paths}'
+            f'Paths in latest archive but not source paths: {truncated_exclusive_archive_paths}',
         )
         )
         raise ValueError(
         raise ValueError(
-            f'Spot check failed: {count_delta_percentage:.2f}% file count delta between source paths and latest archive (tolerance is {spot_check_config["count_tolerance_percentage"]}%)'
+            f'Spot check failed: {count_delta_percentage:.2f}% file count delta between source paths and latest archive (tolerance is {spot_check_config["count_tolerance_percentage"]}%)',
         )
         )
 
 
     failing_paths = compare_spot_check_hashes(
     failing_paths = compare_spot_check_hashes(
@@ -704,14 +703,14 @@ def spot_check(
             placeholder=' ...',
             placeholder=' ...',
         )
         )
         logger.debug(
         logger.debug(
-            f'Source paths with data not matching the latest archive: {truncated_failing_paths}'
+            f'Source paths with data not matching the latest archive: {truncated_failing_paths}',
         )
         )
         raise ValueError(
         raise ValueError(
-            f'Spot check failed: {failing_percentage:.2f}% of source paths with data not matching the latest archive (tolerance is {data_tolerance_percentage}%)'
+            f'Spot check failed: {failing_percentage:.2f}% of source paths with data not matching the latest archive (tolerance is {data_tolerance_percentage}%)',
         )
         )
 
 
     logger.info(
     logger.info(
-        f'Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta'
+        f'Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta',
     )
     )
 
 
 
 
@@ -731,7 +730,8 @@ def run_check(
     Raise ValueError if the Borg repository ID cannot be determined.
     Raise ValueError if the Borg repository ID cannot be determined.
     '''
     '''
     if check_arguments.repository and not borgmatic.config.validate.repositories_match(
     if check_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, check_arguments.repository
+        repository,
+        check_arguments.repository,
     ):
     ):
         return
         return
 
 
@@ -748,7 +748,10 @@ def run_check(
     upgrade_check_times(config, repository_id)
     upgrade_check_times(config, repository_id)
     configured_checks = parse_checks(config, check_arguments.only_checks)
     configured_checks = parse_checks(config, check_arguments.only_checks)
     archive_filter_flags = borgmatic.borg.check.make_archive_filter_flags(
     archive_filter_flags = borgmatic.borg.check.make_archive_filter_flags(
-        local_borg_version, config, configured_checks, check_arguments
+        local_borg_version,
+        config,
+        configured_checks,
+        check_arguments,
     )
     )
     archives_check_id = make_archives_check_id(archive_filter_flags)
     archives_check_id = make_archives_check_id(archive_filter_flags)
     checks = filter_checks_on_frequency(
     checks = filter_checks_on_frequency(

+ 2 - 1
borgmatic/actions/compact.py

@@ -23,7 +23,8 @@ def run_compact(
     Run the "compact" action for the given repository.
     Run the "compact" action for the given repository.
     '''
     '''
     if compact_arguments.repository and not borgmatic.config.validate.repositories_match(
     if compact_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, compact_arguments.repository
+        repository,
+        compact_arguments.repository,
     ):
     ):
         return
         return
 
 

+ 11 - 6
borgmatic/actions/config/bootstrap.py

@@ -36,7 +36,7 @@ def get_config_paths(archive_name, bootstrap_arguments, global_arguments, local_
     expected configuration path data.
     expected configuration path data.
     '''
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(
-        {'borgmatic_source_directory': bootstrap_arguments.borgmatic_source_directory}
+        {'borgmatic_source_directory': bootstrap_arguments.borgmatic_source_directory},
     )
     )
     config = make_bootstrap_config(bootstrap_arguments)
     config = make_bootstrap_config(bootstrap_arguments)
 
 
@@ -52,7 +52,9 @@ def get_config_paths(archive_name, bootstrap_arguments, global_arguments, local_
             borgmatic_source_directory,
             borgmatic_source_directory,
         ):
         ):
             borgmatic_manifest_path = 'sh:' + os.path.join(
             borgmatic_manifest_path = 'sh:' + os.path.join(
-                base_directory, 'bootstrap', 'manifest.json'
+                base_directory,
+                'bootstrap',
+                'manifest.json',
             )
             )
 
 
             extract_process = borgmatic.borg.extract.extract_archive(
             extract_process = borgmatic.borg.extract.extract_archive(
@@ -73,21 +75,21 @@ def get_config_paths(archive_name, bootstrap_arguments, global_arguments, local_
                 break
                 break
         else:
         else:
             raise ValueError(
             raise ValueError(
-                'Cannot read configuration paths from archive due to missing bootstrap manifest'
+                'Cannot read configuration paths from archive due to missing bootstrap manifest',
             )
             )
 
 
     try:
     try:
         manifest_data = json.loads(manifest_json)
         manifest_data = json.loads(manifest_json)
     except json.JSONDecodeError as error:
     except json.JSONDecodeError as error:
         raise ValueError(
         raise ValueError(
-            f'Cannot read configuration paths from archive due to invalid bootstrap manifest JSON: {error}'
+            f'Cannot read configuration paths from archive due to invalid bootstrap manifest JSON: {error}',
         )
         )
 
 
     try:
     try:
         return manifest_data['config_paths']
         return manifest_data['config_paths']
     except KeyError:
     except KeyError:
         raise ValueError(
         raise ValueError(
-            'Cannot read configuration paths from archive due to invalid bootstrap manifest'
+            'Cannot read configuration paths from archive due to invalid bootstrap manifest',
         )
         )
 
 
 
 
@@ -109,7 +111,10 @@ def run_bootstrap(bootstrap_arguments, global_arguments, local_borg_version):
         remote_path=bootstrap_arguments.remote_path,
         remote_path=bootstrap_arguments.remote_path,
     )
     )
     manifest_config_paths = get_config_paths(
     manifest_config_paths = get_config_paths(
-        archive_name, bootstrap_arguments, global_arguments, local_borg_version
+        archive_name,
+        bootstrap_arguments,
+        global_arguments,
+        local_borg_version,
     )
     )
 
 
     logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}")
     logger.info(f"Bootstrapping config paths: {', '.join(manifest_config_paths)}")

+ 3 - 3
borgmatic/actions/config/generate.py

@@ -19,7 +19,7 @@ def run_generate(generate_arguments, global_arguments):
     dry_run_label = ' (dry run; not actually writing anything)' if global_arguments.dry_run else ''
     dry_run_label = ' (dry run; not actually writing anything)' if global_arguments.dry_run else ''
 
 
     logger.answer(
     logger.answer(
-        f'Generating a configuration file at: {generate_arguments.destination_filename}{dry_run_label}'
+        f'Generating a configuration file at: {generate_arguments.destination_filename}{dry_run_label}',
     )
     )
 
 
     borgmatic.config.generate.generate_sample_configuration(
     borgmatic.config.generate.generate_sample_configuration(
@@ -36,7 +36,7 @@ def run_generate(generate_arguments, global_arguments):
 Merged in the contents of configuration file at: {generate_arguments.source_filename}
 Merged in the contents of configuration file at: {generate_arguments.source_filename}
 To review the changes made, run:
 To review the changes made, run:
 
 
-    diff --unified {generate_arguments.source_filename} {generate_arguments.destination_filename}'''
+    diff --unified {generate_arguments.source_filename} {generate_arguments.destination_filename}''',
         )
         )
 
 
     logger.answer(
     logger.answer(
@@ -44,5 +44,5 @@ To review the changes made, run:
 This includes all available configuration options with example values, the few
 This includes all available configuration options with example values, the few
 required options as indicated. Please edit the file to suit your needs.
 required options as indicated. Please edit the file to suit your needs.
 
 
-If you ever need help: https://torsion.org/borgmatic/#issues'''
+If you ever need help: https://torsion.org/borgmatic/#issues''',
     )
     )

+ 1 - 1
borgmatic/actions/config/validate.py

@@ -18,7 +18,7 @@ def run_validate(validate_arguments, configs):
     borgmatic.logger.add_custom_log_levels()
     borgmatic.logger.add_custom_log_levels()
 
 
     if validate_arguments.show:
     if validate_arguments.show:
-        for config_path, config in configs.items():
+        for config in configs.values():
             if len(configs) > 1:
             if len(configs) > 1:
                 logger.answer('---')
                 logger.answer('---')
 
 

+ 11 - 5
borgmatic/actions/create.py

@@ -30,18 +30,19 @@ def run_create(
     If create_arguments.json is True, yield the JSON output from creating the archive.
     If create_arguments.json is True, yield the JSON output from creating the archive.
     '''
     '''
     if create_arguments.repository and not borgmatic.config.validate.repositories_match(
     if create_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, create_arguments.repository
+        repository,
+        create_arguments.repository,
     ):
     ):
         return
         return
 
 
     if config.get('list_details') and config.get('progress'):
     if config.get('list_details') and config.get('progress'):
         raise ValueError(
         raise ValueError(
-            'With the create action, only one of --list/--files/list_details and --progress/progress can be used.'
+            'With the create action, only one of --list/--files/list_details and --progress/progress can be used.',
         )
         )
 
 
     if config.get('list_details') and create_arguments.json:
     if config.get('list_details') and create_arguments.json:
         raise ValueError(
         raise ValueError(
-            'With the create action, only one of --list/--files/list_details and --json can be used.'
+            'With the create action, only one of --list/--files/list_details and --json can be used.',
         )
         )
 
 
     logger.info(f'Creating archive{dry_run_label}')
     logger.info(f'Creating archive{dry_run_label}')
@@ -56,7 +57,9 @@ def run_create(
             global_arguments.dry_run,
             global_arguments.dry_run,
         )
         )
         patterns = pattern.process_patterns(
         patterns = pattern.process_patterns(
-            pattern.collect_patterns(config), config, working_directory
+            pattern.collect_patterns(config),
+            config,
+            working_directory,
         )
         )
         active_dumps = borgmatic.hooks.dispatch.call_hooks(
         active_dumps = borgmatic.hooks.dispatch.call_hooks(
             'dump_data_sources',
             'dump_data_sources',
@@ -72,7 +75,10 @@ def run_create(
         # we could end up with duplicate paths that cause Borg to hang when it tries to read from
         # we could end up with duplicate paths that cause Borg to hang when it tries to read from
         # the same named pipe twice.
         # the same named pipe twice.
         patterns = pattern.process_patterns(
         patterns = pattern.process_patterns(
-            patterns, config, working_directory, skip_expand_paths=config_paths
+            patterns,
+            config,
+            working_directory,
+            skip_expand_paths=config_paths,
         )
         )
         stream_processes = [process for processes in active_dumps.values() for process in processes]
         stream_processes = [process for processes in active_dumps.values() for process in processes]
 
 

+ 2 - 1
borgmatic/actions/delete.py

@@ -21,7 +21,8 @@ def run_delete(
     Run the "delete" action for the given repository and archive(s).
     Run the "delete" action for the given repository and archive(s).
     '''
     '''
     if delete_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if delete_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, delete_arguments.repository
+        repository,
+        delete_arguments.repository,
     ):
     ):
         logger.answer('Deleting archives')
         logger.answer('Deleting archives')
 
 

+ 2 - 1
borgmatic/actions/export_key.py

@@ -19,7 +19,8 @@ def run_export_key(
     Run the "key export" action for the given repository.
     Run the "key export" action for the given repository.
     '''
     '''
     if export_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if export_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, export_arguments.repository
+        repository,
+        export_arguments.repository,
     ):
     ):
         logger.info('Exporting repository key')
         logger.info('Exporting repository key')
         borgmatic.borg.export_key.export_key(
         borgmatic.borg.export_key.export_key(

+ 2 - 1
borgmatic/actions/export_tar.py

@@ -20,7 +20,8 @@ def run_export_tar(
     Run the "export-tar" action for the given repository.
     Run the "export-tar" action for the given repository.
     '''
     '''
     if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, export_tar_arguments.repository
+        repository,
+        export_tar_arguments.repository,
     ):
     ):
         logger.info(f'Exporting archive {export_tar_arguments.archive} as tar file')
         logger.info(f'Exporting archive {export_tar_arguments.archive} as tar file')
         borgmatic.borg.export_tar.export_tar_archive(
         borgmatic.borg.export_tar.export_tar_archive(

+ 2 - 1
borgmatic/actions/extract.py

@@ -22,7 +22,8 @@ def run_extract(
     Run the "extract" action for the given repository.
     Run the "extract" action for the given repository.
     '''
     '''
     if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, extract_arguments.repository
+        repository,
+        extract_arguments.repository,
     ):
     ):
         logger.info(f'Extracting archive {extract_arguments.archive}')
         logger.info(f'Extracting archive {extract_arguments.archive}')
         borgmatic.borg.extract.extract_archive(
         borgmatic.borg.extract.extract_archive(

+ 2 - 1
borgmatic/actions/import_key.py

@@ -19,7 +19,8 @@ def run_import_key(
     Run the "key import" action for the given repository.
     Run the "key import" action for the given repository.
     '''
     '''
     if import_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if import_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, import_arguments.repository
+        repository,
+        import_arguments.repository,
     ):
     ):
         logger.info('Importing repository key')
         logger.info('Importing repository key')
         borgmatic.borg.import_key.import_key(
         borgmatic.borg.import_key.import_key(

+ 2 - 1
borgmatic/actions/info.py

@@ -24,7 +24,8 @@ def run_info(
     If info_arguments.json is True, yield the JSON output from the info for the archive.
     If info_arguments.json is True, yield the JSON output from the info for the archive.
     '''
     '''
     if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if info_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, info_arguments.repository
+        repository,
+        info_arguments.repository,
     ):
     ):
         if not info_arguments.json:
         if not info_arguments.json:
             logger.answer('Displaying archive summary information')
             logger.answer('Displaying archive summary information')

+ 2 - 1
borgmatic/actions/list.py

@@ -23,7 +23,8 @@ def run_list(
     If list_arguments.json is True, yield the JSON output from listing the archive.
     If list_arguments.json is True, yield the JSON output from listing the archive.
     '''
     '''
     if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if list_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, list_arguments.repository
+        repository,
+        list_arguments.repository,
     ):
     ):
         if not list_arguments.json:
         if not list_arguments.json:
             if list_arguments.find_paths:  # pragma: no cover
             if list_arguments.find_paths:  # pragma: no cover

+ 2 - 1
borgmatic/actions/mount.py

@@ -20,7 +20,8 @@ def run_mount(
     Run the "mount" action for the given repository.
     Run the "mount" action for the given repository.
     '''
     '''
     if mount_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if mount_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, mount_arguments.repository
+        repository,
+        mount_arguments.repository,
     ):
     ):
         if mount_arguments.archive:
         if mount_arguments.archive:
             logger.info(f'Mounting archive {mount_arguments.archive}')
             logger.info(f'Mounting archive {mount_arguments.archive}')

+ 14 - 12
borgmatic/actions/pattern.py

@@ -47,7 +47,8 @@ def collect_patterns(config):
         return (
         return (
             tuple(
             tuple(
                 borgmatic.borg.pattern.Pattern(
                 borgmatic.borg.pattern.Pattern(
-                    source_directory, source=borgmatic.borg.pattern.Pattern_source.CONFIG
+                    source_directory,
+                    source=borgmatic.borg.pattern.Pattern_source.CONFIG,
                 )
                 )
                 for source_directory in config.get('source_directories', ())
                 for source_directory in config.get('source_directories', ())
             )
             )
@@ -67,7 +68,7 @@ def collect_patterns(config):
             + tuple(
             + tuple(
                 parse_pattern(pattern_line.strip())
                 parse_pattern(pattern_line.strip())
                 for filename in config.get('patterns_from', ())
                 for filename in config.get('patterns_from', ())
-                for pattern_line in open(filename).readlines()
+                for pattern_line in open(filename, encoding='utf-8').readlines()
                 if not pattern_line.lstrip().startswith('#')
                 if not pattern_line.lstrip().startswith('#')
                 if pattern_line.strip()
                 if pattern_line.strip()
             )
             )
@@ -77,7 +78,7 @@ def collect_patterns(config):
                     borgmatic.borg.pattern.Pattern_style.FNMATCH,
                     borgmatic.borg.pattern.Pattern_style.FNMATCH,
                 )
                 )
                 for filename in config.get('exclude_from', ())
                 for filename in config.get('exclude_from', ())
-                for exclude_line in open(filename).readlines()
+                for exclude_line in open(filename, encoding='utf-8').readlines()
                 if not exclude_line.lstrip().startswith('#')
                 if not exclude_line.lstrip().startswith('#')
                 if exclude_line.strip()
                 if exclude_line.strip()
             )
             )
@@ -112,9 +113,8 @@ def expand_directory(directory, working_directory):
             glob_path
             glob_path
             # If these are equal, that means we didn't add any working directory prefix above.
             # If these are equal, that means we didn't add any working directory prefix above.
             if normalized_directory == expanded_directory
             if normalized_directory == expanded_directory
-            # Remove the working directory prefix that we added above in order to make glob() work.
-            # We can't use os.path.relpath() here because it collapses any use of Borg's slashdot
-            # hack.
+            # Remove the working directory prefix added above in order to make glob() work. We
+            # can't use os.path.relpath() here because it collapses any use of Borg's slashdot hack.
             else glob_path.removeprefix(working_directory_prefix)
             else glob_path.removeprefix(working_directory_prefix)
         )
         )
         for glob_path in glob_paths
         for glob_path in glob_paths
@@ -161,7 +161,7 @@ def expand_patterns(patterns, working_directory=None, skip_paths=None):
                 )
                 )
             )
             )
             for pattern in patterns
             for pattern in patterns
-        )
+        ),
     )
     )
 
 
 
 
@@ -180,8 +180,10 @@ def get_existent_path_or_parent(path):
     try:
     try:
         return next(
         return next(
             candidate_path
             candidate_path
-            for candidate_path in (path,)
-            + tuple(str(parent) for parent in pathlib.PurePath(path).parents)
+            for candidate_path in (
+                path,
+                *tuple(str(parent) for parent in pathlib.PurePath(path).parents),
+            )
             if os.path.exists(candidate_path)
             if os.path.exists(candidate_path)
         )
         )
     except StopIteration:
     except StopIteration:
@@ -219,7 +221,7 @@ def device_map_patterns(patterns, working_directory=None):
         for pattern in patterns
         for pattern in patterns
         for existent_path in (
         for existent_path in (
             get_existent_path_or_parent(
             get_existent_path_or_parent(
-                os.path.join(working_directory or '', pattern.path.lstrip('^'))
+                os.path.join(working_directory or '', pattern.path.lstrip('^')),
             ),
             ),
         )
         )
     )
     )
@@ -289,8 +291,8 @@ def process_patterns(patterns, config, working_directory, skip_expand_paths=None
                     patterns,
                     patterns,
                     working_directory=working_directory,
                     working_directory=working_directory,
                     skip_paths=skip_paths,
                     skip_paths=skip_paths,
-                )
+                ),
             ),
             ),
             config,
             config,
-        )
+        ),
     )
     )

+ 2 - 1
borgmatic/actions/prune.py

@@ -22,7 +22,8 @@ def run_prune(
     Run the "prune" action for the given repository.
     Run the "prune" action for the given repository.
     '''
     '''
     if prune_arguments.repository and not borgmatic.config.validate.repositories_match(
     if prune_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, prune_arguments.repository
+        repository,
+        prune_arguments.repository,
     ):
     ):
         return
         return
 
 

+ 14 - 10
borgmatic/actions/recreate.py

@@ -26,7 +26,8 @@ def run_recreate(
     Run the "recreate" action for the given repository.
     Run the "recreate" action for the given repository.
     '''
     '''
     if recreate_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if recreate_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, recreate_arguments.repository
+        repository,
+        recreate_arguments.repository,
     ):
     ):
         if recreate_arguments.archive:
         if recreate_arguments.archive:
             logger.answer(f'Recreating archive {recreate_arguments.archive}')
             logger.answer(f'Recreating archive {recreate_arguments.archive}')
@@ -35,7 +36,9 @@ def run_recreate(
 
 
         # Collect and process patterns.
         # Collect and process patterns.
         processed_patterns = process_patterns(
         processed_patterns = process_patterns(
-            collect_patterns(config), config, borgmatic.config.paths.get_working_directory(config)
+            collect_patterns(config),
+            config,
+            borgmatic.config.paths.get_working_directory(config),
         )
         )
 
 
         archive = borgmatic.borg.repo_list.resolve_archive_name(
         archive = borgmatic.borg.repo_list.resolve_archive_name(
@@ -51,13 +54,13 @@ def run_recreate(
         if archive and archive.endswith('.recreate'):
         if archive and archive.endswith('.recreate'):
             if recreate_arguments.archive == 'latest':
             if recreate_arguments.archive == 'latest':
                 raise ValueError(
                 raise ValueError(
-                    f'The latest archive "{archive}" is leftover from a prior recreate. Delete it first or select a different archive.'
-                )
-            else:
-                raise ValueError(
-                    f'The archive "{recreate_arguments.archive}" is leftover from a prior recreate. Select a different archive.'
+                    f'The latest archive "{archive}" is leftover from a prior recreate. Delete it first or select a different archive.',
                 )
                 )
 
 
+            raise ValueError(
+                f'The archive "{recreate_arguments.archive}" is leftover from a prior recreate. Select a different archive.',
+            )
+
         try:
         try:
             borgmatic.borg.recreate.recreate_archive(
             borgmatic.borg.recreate.recreate_archive(
                 repository['path'],
                 repository['path'],
@@ -74,11 +77,12 @@ def run_recreate(
             if error.returncode == BORG_EXIT_CODE_ARCHIVE_ALREADY_EXISTS:
             if error.returncode == BORG_EXIT_CODE_ARCHIVE_ALREADY_EXISTS:
                 if recreate_arguments.target:
                 if recreate_arguments.target:
                     raise ValueError(
                     raise ValueError(
-                        f'The archive "{recreate_arguments.target}" already exists. Delete it first or set a different target archive name.'
+                        f'The archive "{recreate_arguments.target}" already exists. Delete it first or set a different target archive name.',
                     )
                     )
-                elif archive:
+
+                if archive:
                     raise ValueError(
                     raise ValueError(
-                        f'The archive "{archive}.recreate" is leftover from a prior recreate. Delete it first or select a different archive.'
+                        f'The archive "{archive}.recreate" is leftover from a prior recreate. Delete it first or select a different archive.',
                     )
                     )
 
 
             raise
             raise

+ 3 - 2
borgmatic/actions/repo_create.py

@@ -19,7 +19,8 @@ def run_repo_create(
     Run the "repo-create" action for the given repository.
     Run the "repo-create" action for the given repository.
     '''
     '''
     if repo_create_arguments.repository and not borgmatic.config.validate.repositories_match(
     if repo_create_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, repo_create_arguments.repository
+        repository,
+        repo_create_arguments.repository,
     ):
     ):
         return
         return
 
 
@@ -29,7 +30,7 @@ def run_repo_create(
 
 
     if not encryption_mode:
     if not encryption_mode:
         raise ValueError(
         raise ValueError(
-            'With the repo-create action, either the --encryption flag or the repository encryption option is required.'
+            'With the repo-create action, either the --encryption flag or the repository encryption option is required.',
         )
         )
 
 
     borgmatic.borg.repo_create.create_repository(
     borgmatic.borg.repo_create.create_repository(

+ 3 - 2
borgmatic/actions/repo_delete.py

@@ -18,10 +18,11 @@ def run_repo_delete(
     Run the "repo-delete" action for the given repository.
     Run the "repo-delete" action for the given repository.
     '''
     '''
     if repo_delete_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if repo_delete_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, repo_delete_arguments.repository
+        repository,
+        repo_delete_arguments.repository,
     ):
     ):
         logger.answer(
         logger.answer(
-            'Deleting repository' + (' cache' if repo_delete_arguments.cache_only else '')
+            'Deleting repository' + (' cache' if repo_delete_arguments.cache_only else ''),
         )
         )
 
 
         borgmatic.borg.repo_delete.delete_repository(
         borgmatic.borg.repo_delete.delete_repository(

+ 2 - 1
borgmatic/actions/repo_info.py

@@ -22,7 +22,8 @@ def run_repo_info(
     If repo_info_arguments.json is True, yield the JSON output from the info for the repository.
     If repo_info_arguments.json is True, yield the JSON output from the info for the repository.
     '''
     '''
     if repo_info_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if repo_info_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, repo_info_arguments.repository
+        repository,
+        repo_info_arguments.repository,
     ):
     ):
         if not repo_info_arguments.json:
         if not repo_info_arguments.json:
             logger.answer('Displaying repository summary information')
             logger.answer('Displaying repository summary information')

+ 2 - 1
borgmatic/actions/repo_list.py

@@ -22,7 +22,8 @@ def run_repo_list(
     If repo_list_arguments.json is True, yield the JSON output from listing the repository.
     If repo_list_arguments.json is True, yield the JSON output from listing the repository.
     '''
     '''
     if repo_list_arguments.repository is None or borgmatic.config.validate.repositories_match(
     if repo_list_arguments.repository is None or borgmatic.config.validate.repositories_match(
-        repository, repo_list_arguments.repository
+        repository,
+        repo_list_arguments.repository,
     ):
     ):
         if not repo_list_arguments.json:
         if not repo_list_arguments.json:
             logger.answer('Listing repository')
             logger.answer('Listing repository')

+ 27 - 20
borgmatic/actions/restore.py

@@ -44,7 +44,7 @@ def dumps_match(first, second, default_port=None):
             if second_value == default_port and first_value is None:
             if second_value == default_port and first_value is None:
                 continue
                 continue
 
 
-        if first_value == UNSPECIFIED or second_value == UNSPECIFIED:
+        if first_value == UNSPECIFIED or second_value == UNSPECIFIED:  # noqa: PLR1714
             continue
             continue
 
 
         if first_value != second_value:
         if first_value != second_value:
@@ -66,7 +66,7 @@ def render_dump_metadata(dump):
     else:
     else:
         metadata = f'{name}' if hostname is UNSPECIFIED else f'{name}@{hostname}'
         metadata = f'{name}' if hostname is UNSPECIFIED else f'{name}@{hostname}'
 
 
-    if dump.hook_name not in (None, UNSPECIFIED):
+    if dump.hook_name not in {None, UNSPECIFIED}:
         return f'{metadata} ({dump.hook_name})'
         return f'{metadata} ({dump.hook_name})'
 
 
     return metadata
     return metadata
@@ -112,14 +112,15 @@ def get_configured_data_source(config, restore_dump):
 
 
     if len(matching_dumps) > 1:
     if len(matching_dumps) > 1:
         raise ValueError(
         raise ValueError(
-            f'Cannot restore data source {render_dump_metadata(restore_dump)} because there are multiple matching data sources configured'
+            f'Cannot restore data source {render_dump_metadata(restore_dump)} because there are multiple matching data sources configured',
         )
         )
 
 
     return matching_dumps[0]
     return matching_dumps[0]
 
 
 
 
 def strip_path_prefix_from_extracted_dump_destination(
 def strip_path_prefix_from_extracted_dump_destination(
-    destination_path, borgmatic_runtime_directory
+    destination_path,
+    borgmatic_runtime_directory,
 ):
 ):
     '''
     '''
     Directory-format dump files get extracted into a temporary directory containing a path prefix
     Directory-format dump files get extracted into a temporary directory containing a path prefix
@@ -146,7 +147,8 @@ def strip_path_prefix_from_extracted_dump_destination(
             continue
             continue
 
 
         shutil.move(
         shutil.move(
-            subdirectory_path, os.path.join(borgmatic_runtime_directory, databases_directory)
+            subdirectory_path,
+            os.path.join(borgmatic_runtime_directory, databases_directory),
         )
         )
         break
         break
 
 
@@ -170,7 +172,7 @@ def restore_single_dump(
     that data source from the archive.
     that data source from the archive.
     '''
     '''
     dump_metadata = render_dump_metadata(
     dump_metadata = render_dump_metadata(
-        Dump(hook_name, data_source['name'], data_source.get('hostname'), data_source.get('port'))
+        Dump(hook_name, data_source['name'], data_source.get('hostname'), data_source.get('port')),
     )
     )
 
 
     logger.info(f'Restoring data source {dump_metadata}')
     logger.info(f'Restoring data source {dump_metadata}')
@@ -198,8 +200,8 @@ def restore_single_dump(
             archive=archive_name,
             archive=archive_name,
             paths=[
             paths=[
                 borgmatic.hooks.data_source.dump.convert_glob_patterns_to_borg_pattern(
                 borgmatic.hooks.data_source.dump.convert_glob_patterns_to_borg_pattern(
-                    dump_patterns
-                )
+                    dump_patterns,
+                ),
             ],
             ],
             config=config,
             config=config,
             local_borg_version=local_borg_version,
             local_borg_version=local_borg_version,
@@ -214,7 +216,8 @@ def restore_single_dump(
 
 
         if destination_path and not global_arguments.dry_run:
         if destination_path and not global_arguments.dry_run:
             strip_path_prefix_from_extracted_dump_destination(
             strip_path_prefix_from_extracted_dump_destination(
-                destination_path, borgmatic_runtime_directory
+                destination_path,
+                borgmatic_runtime_directory,
             )
             )
     finally:
     finally:
         if destination_path and not global_arguments.dry_run:
         if destination_path and not global_arguments.dry_run:
@@ -250,7 +253,7 @@ def collect_dumps_from_archive(
     and return them as a set of Dump instances.
     and return them as a set of Dump instances.
     '''
     '''
     borgmatic_source_directory = str(
     borgmatic_source_directory = str(
-        pathlib.Path(borgmatic.config.paths.get_borgmatic_source_directory(config))
+        pathlib.Path(borgmatic.config.paths.get_borgmatic_source_directory(config)),
     )
     )
 
 
     # Probe for the data source dumps in multiple locations, as the default location has moved to
     # Probe for the data source dumps in multiple locations, as the default location has moved to
@@ -265,7 +268,8 @@ def collect_dumps_from_archive(
         list_paths=[
         list_paths=[
             'sh:'
             'sh:'
             + borgmatic.hooks.data_source.dump.make_data_source_dump_path(
             + borgmatic.hooks.data_source.dump.make_data_source_dump_path(
-                base_directory, '*_databases/*/*'
+                base_directory,
+                '*_databases/*/*',
             )
             )
             for base_directory in (
             for base_directory in (
                 'borgmatic',
                 'borgmatic',
@@ -292,7 +296,8 @@ def collect_dumps_from_archive(
         ):
         ):
             try:
             try:
                 (hook_name, host_and_port, data_source_name) = dump_path.split(
                 (hook_name, host_and_port, data_source_name) = dump_path.split(
-                    base_directory + os.path.sep, 1
+                    base_directory + os.path.sep,
+                    1,
                 )[1].split(os.path.sep)[0:3]
                 )[1].split(os.path.sep)[0:3]
             except (ValueError, IndexError):
             except (ValueError, IndexError):
                 continue
                 continue
@@ -315,7 +320,7 @@ def collect_dumps_from_archive(
             break
             break
         else:
         else:
             logger.warning(
             logger.warning(
-                f'Ignoring invalid data source dump path "{dump_path}" in archive {archive}'
+                f'Ignoring invalid data source dump path "{dump_path}" in archive {archive}',
             )
             )
 
 
     return dumps_from_archive
     return dumps_from_archive
@@ -359,7 +364,7 @@ def get_dumps_to_restore(restore_arguments, dumps_from_archive):
                 data_source_name='all',
                 data_source_name='all',
                 hostname=UNSPECIFIED,
                 hostname=UNSPECIFIED,
                 port=UNSPECIFIED,
                 port=UNSPECIFIED,
-            )
+            ),
         }
         }
     )
     )
     missing_dumps = set()
     missing_dumps = set()
@@ -386,7 +391,7 @@ def get_dumps_to_restore(restore_arguments, dumps_from_archive):
             dumps_to_restore.add(matching_dumps[0])
             dumps_to_restore.add(matching_dumps[0])
         else:
         else:
             raise ValueError(
             raise ValueError(
-                f'Cannot restore data source {render_dump_metadata(requested_dump)} because there are multiple matching dumps in the archive. Try adding flags to disambiguate.'
+                f'Cannot restore data source {render_dump_metadata(requested_dump)} because there are multiple matching dumps in the archive. Try adding flags to disambiguate.',
             )
             )
 
 
     if missing_dumps:
     if missing_dumps:
@@ -395,7 +400,7 @@ def get_dumps_to_restore(restore_arguments, dumps_from_archive):
         )
         )
 
 
         raise ValueError(
         raise ValueError(
-            f"Cannot restore data source dump{'s' if len(missing_dumps) > 1 else ''} {rendered_dumps} missing from archive"
+            f"Cannot restore data source dump{'s' if len(missing_dumps) > 1 else ''} {rendered_dumps} missing from archive",
         )
         )
 
 
     return dumps_to_restore
     return dumps_to_restore
@@ -411,14 +416,15 @@ def ensure_requested_dumps_restored(dumps_to_restore, dumps_actually_restored):
         raise ValueError('No data source dumps were found to restore')
         raise ValueError('No data source dumps were found to restore')
 
 
     missing_dumps = sorted(
     missing_dumps = sorted(
-        dumps_to_restore - dumps_actually_restored, key=lambda dump: dump.data_source_name
+        dumps_to_restore - dumps_actually_restored,
+        key=lambda dump: dump.data_source_name,
     )
     )
 
 
     if missing_dumps:
     if missing_dumps:
         rendered_dumps = ', '.join(f'{render_dump_metadata(dump)}' for dump in missing_dumps)
         rendered_dumps = ', '.join(f'{render_dump_metadata(dump)}' for dump in missing_dumps)
 
 
         raise ValueError(
         raise ValueError(
-            f"Cannot restore data source{'s' if len(missing_dumps) > 1 else ''} {rendered_dumps} missing from borgmatic's configuration"
+            f"Cannot restore data source{'s' if len(missing_dumps) > 1 else ''} {rendered_dumps} missing from borgmatic's configuration",
         )
         )
 
 
 
 
@@ -439,7 +445,8 @@ def run_restore(
     matching dump in the archive.
     matching dump in the archive.
     '''
     '''
     if restore_arguments.repository and not borgmatic.config.validate.repositories_match(
     if restore_arguments.repository and not borgmatic.config.validate.repositories_match(
-        repository, restore_arguments.repository
+        repository,
+        restore_arguments.repository,
     ):
     ):
         return
         return
 
 
@@ -516,7 +523,7 @@ def run_restore(
                 remote_path,
                 remote_path,
                 archive_name,
                 archive_name,
                 restore_dump.hook_name,
                 restore_dump.hook_name,
-                dict(found_data_source, **{'schemas': restore_arguments.schemas}),
+                dict(found_data_source, schemas=restore_arguments.schemas),
                 connection_params,
                 connection_params,
                 borgmatic_runtime_directory,
                 borgmatic_runtime_directory,
             )
             )

+ 1 - 1
borgmatic/actions/transfer.py

@@ -19,7 +19,7 @@ def run_transfer(
     '''
     '''
     if transfer_arguments.archive and config.get('match_archives'):
     if transfer_arguments.archive and config.get('match_archives'):
         raise ValueError(
         raise ValueError(
-            'With the transfer action, only one of --archive and --match-archives/match_archives can be used.'
+            'With the transfer action, only one of --archive and --match-archives/match_archives can be used.',
         )
         )
 
 
     logger.info('Transferring archives to repository')
     logger.info('Transferring archives to repository')

+ 6 - 8
borgmatic/borg/borg.py

@@ -39,9 +39,9 @@ def run_arbitrary_borg(
         borg_command = tuple(options[:command_options_start_index])
         borg_command = tuple(options[:command_options_start_index])
         command_options = tuple(options[command_options_start_index:])
         command_options = tuple(options[command_options_start_index:])
 
 
-        if borg_command and borg_command[0] in borgmatic.commands.arguments.ACTION_ALIASES.keys():
+        if borg_command and borg_command[0] in borgmatic.commands.arguments.ACTION_ALIASES:
             logger.warning(
             logger.warning(
-                f"Borg's {borg_command[0]} subcommand is supported natively by borgmatic. Try this instead: borgmatic {borg_command[0]}"
+                f"Borg's {borg_command[0]} subcommand is supported natively by borgmatic. Try this instead: borgmatic {borg_command[0]}",
             )
             )
     except IndexError:
     except IndexError:
         borg_command = ()
         borg_command = ()
@@ -57,16 +57,14 @@ def run_arbitrary_borg(
         + command_options
         + command_options
     )
     )
 
 
-    return execute_command(
+    return execute_command(  # noqa: S604
         tuple(shlex.quote(part) for part in full_command),
         tuple(shlex.quote(part) for part in full_command),
         output_file=DO_NOT_CAPTURE,
         output_file=DO_NOT_CAPTURE,
-        shell=True,  # noqa: S604
+        shell=True,
         environment=dict(
         environment=dict(
             (environment.make_environment(config) or {}),
             (environment.make_environment(config) or {}),
-            **{
-                'BORG_REPO': repository_path,
-                'ARCHIVE': archive if archive else '',
-            },
+            BORG_REPO=repository_path,
+            ARCHIVE=archive if archive else '',
         ),
         ),
         working_directory=borgmatic.config.paths.get_working_directory(config),
         working_directory=borgmatic.config.paths.get_working_directory(config),
         borg_local_path=local_path,
         borg_local_path=local_path,

+ 2 - 2
borgmatic/borg/change_passphrase.py

@@ -49,7 +49,7 @@ def change_passphrase(
     config_without_passphrase = {
     config_without_passphrase = {
         option_name: value
         option_name: value
         for (option_name, value) in config.items()
         for (option_name, value) in config.items()
-        if option_name not in ('encryption_passphrase', 'encryption_passcommand')
+        if option_name not in {'encryption_passphrase', 'encryption_passcommand'}
     }
     }
 
 
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
@@ -63,5 +63,5 @@ def change_passphrase(
     )
     )
 
 
     logger.answer(
     logger.answer(
-        f"{repository_path}: Don't forget to update your encryption_passphrase option (if needed)"
+        f"{repository_path}: Don't forget to update your encryption_passphrase option (if needed)",
     )
     )

+ 10 - 5
borgmatic/borg/check.py

@@ -42,12 +42,12 @@ def make_archive_filter_flags(local_borg_version, config, checks, check_argument
 
 
     if check_last:
     if check_last:
         logger.warning(
         logger.warning(
-            'Ignoring check_last option, as "archives" or "data" are not in consistency checks'
+            'Ignoring check_last option, as "archives" or "data" are not in consistency checks',
         )
         )
 
 
     if prefix:
     if prefix:
         logger.warning(
         logger.warning(
-            'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks'
+            'Ignoring consistency prefix option, as "archives" or "data" are not in consistency checks',
         )
         )
 
 
     return ()
     return ()
@@ -77,13 +77,18 @@ def make_check_name_flags(checks, archive_filter_flags):
         return common_flags
         return common_flags
 
 
     return (
     return (
-        tuple(f'--{check}-only' for check in checks if check in ('repository', 'archives'))
+        tuple(f'--{check}-only' for check in checks if check in {'repository', 'archives'})
         + common_flags
         + common_flags
     )
     )
 
 
 
 
 def get_repository_id(
 def get_repository_id(
-    repository_path, config, local_borg_version, global_arguments, local_path, remote_path
+    repository_path,
+    config,
+    local_borg_version,
+    global_arguments,
+    local_path,
+    remote_path,
 ):
 ):
     '''
     '''
     Given a local or remote repository path, a configuration dict, the local Borg version, global
     Given a local or remote repository path, a configuration dict, the local Borg version, global
@@ -101,7 +106,7 @@ def get_repository_id(
                 global_arguments,
                 global_arguments,
                 local_path,
                 local_path,
                 remote_path,
                 remote_path,
-            )
+            ),
         )['repository']['id']
         )['repository']['id']
     except (json.JSONDecodeError, KeyError):
     except (json.JSONDecodeError, KeyError):
         raise ValueError(f'Cannot determine Borg repository ID for {repository_path}')
         raise ValueError(f'Cannot determine Borg repository ID for {repository_path}')

+ 1 - 1
borgmatic/borg/compact.py

@@ -48,7 +48,7 @@ def compact_segments(
     )
     )
 
 
     if dry_run and not feature.available(feature.Feature.DRY_RUN_COMPACT, local_borg_version):
     if dry_run and not feature.available(feature.Feature.DRY_RUN_COMPACT, local_borg_version):
-        logging.info('Skipping compact (dry run)')
+        logger.info('Skipping compact (dry run)')
         return
         return
 
 
     execute_command(
     execute_command(

+ 31 - 20
borgmatic/borg/create.py

@@ -72,8 +72,13 @@ def collect_special_file_paths(
     # files including any named pipe we've created. And omit "--filter" because that can break the
     # files including any named pipe we've created. And omit "--filter" because that can break the
     # paths output parsing below such that path lines no longer start with the expected "- ".
     # paths output parsing below such that path lines no longer start with the expected "- ".
     paths_output = execute_command_and_capture_output(
     paths_output = execute_command_and_capture_output(
-        flags.omit_flag_and_value(flags.omit_flag(create_command, '--exclude-nodump'), '--filter')
-        + ('--dry-run', '--list'),
+        (
+            *flags.omit_flag_and_value(
+                flags.omit_flag(create_command, '--exclude-nodump'), '--filter'
+            ),
+            '--dry-run',
+            '--list',
+        ),
         capture_stderr=True,
         capture_stderr=True,
         working_directory=working_directory,
         working_directory=working_directory,
         environment=environment.make_environment(config),
         environment=environment.make_environment(config),
@@ -86,7 +91,7 @@ def collect_special_file_paths(
     paths = tuple(
     paths = tuple(
         path_line.split(' ', 1)[1]
         path_line.split(' ', 1)[1]
         for path_line in paths_output.split('\n')
         for path_line in paths_output.split('\n')
-        if path_line and path_line.startswith('- ') or path_line.startswith('+ ')
+        if path_line and path_line.startswith(('- ', '+ '))
     )
     )
 
 
     # These are the subset of those files that contain the borgmatic runtime directory.
     # These are the subset of those files that contain the borgmatic runtime directory.
@@ -100,7 +105,7 @@ def collect_special_file_paths(
         # If no paths to backup contain the runtime directory, it must've been excluded.
         # If no paths to backup contain the runtime directory, it must've been excluded.
         if not paths_containing_runtime_directory and not dry_run:
         if not paths_containing_runtime_directory and not dry_run:
             raise ValueError(
             raise ValueError(
-                f'The runtime directory {os.path.normpath(borgmatic_runtime_directory)} overlaps with the configured excludes or patterns with excludes. Please ensure the runtime directory is not excluded.'
+                f'The runtime directory {os.path.normpath(borgmatic_runtime_directory)} overlaps with the configured excludes or patterns with excludes. Please ensure the runtime directory is not excluded.',
             )
             )
 
 
     return tuple(
     return tuple(
@@ -142,7 +147,8 @@ def make_base_create_command(
         borgmatic.borg.pattern.check_all_root_patterns_exist(patterns)
         borgmatic.borg.pattern.check_all_root_patterns_exist(patterns)
 
 
     patterns_file = borgmatic.borg.pattern.write_patterns_file(
     patterns_file = borgmatic.borg.pattern.write_patterns_file(
-        patterns, borgmatic_runtime_directory
+        patterns,
+        borgmatic_runtime_directory,
     )
     )
     checkpoint_interval = config.get('checkpoint_interval', None)
     checkpoint_interval = config.get('checkpoint_interval', None)
     checkpoint_volume = config.get('checkpoint_volume', None)
     checkpoint_volume = config.get('checkpoint_volume', None)
@@ -218,14 +224,16 @@ def make_base_create_command(
     )
     )
 
 
     create_positional_arguments = flags.make_repository_archive_flags(
     create_positional_arguments = flags.make_repository_archive_flags(
-        repository_path, archive_name_format, local_borg_version
+        repository_path,
+        archive_name_format,
+        local_borg_version,
     )
     )
 
 
     # If database hooks are enabled (as indicated by streaming processes), exclude files that might
     # If database hooks are enabled (as indicated by streaming processes), exclude files that might
     # cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
     # cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
     if stream_processes and not config.get('read_special'):
     if stream_processes and not config.get('read_special'):
         logger.warning(
         logger.warning(
-            'Ignoring configured "read_special" value of false, as true is needed for database hooks.'
+            'Ignoring configured "read_special" value of false, as true is needed for database hooks.',
         )
         )
         working_directory = borgmatic.config.paths.get_working_directory(config)
         working_directory = borgmatic.config.paths.get_working_directory(config)
 
 
@@ -246,7 +254,7 @@ def make_base_create_command(
                 placeholder=' ...',
                 placeholder=' ...',
             )
             )
             logger.warning(
             logger.warning(
-                f'Excluding special files to prevent Borg from hanging: {truncated_special_file_paths}'
+                f'Excluding special files to prevent Borg from hanging: {truncated_special_file_paths}',
             )
             )
             patterns_file = borgmatic.borg.pattern.write_patterns_file(
             patterns_file = borgmatic.borg.pattern.write_patterns_file(
                 tuple(
                 tuple(
@@ -298,7 +306,7 @@ def create_archive(
 
 
     working_directory = borgmatic.config.paths.get_working_directory(config)
     working_directory = borgmatic.config.paths.get_working_directory(config)
 
 
-    (create_flags, create_positional_arguments, patterns_file) = make_base_create_command(
+    (create_flags, create_positional_arguments, _) = make_base_create_command(
         dry_run,
         dry_run,
         repository_path,
         repository_path,
         config,
         config,
@@ -345,7 +353,8 @@ def create_archive(
             borg_local_path=local_path,
             borg_local_path=local_path,
             borg_exit_codes=borg_exit_codes,
             borg_exit_codes=borg_exit_codes,
         )
         )
-    elif output_log_level is None:
+
+    if output_log_level is None:
         return execute_command_and_capture_output(
         return execute_command_and_capture_output(
             create_flags + create_positional_arguments,
             create_flags + create_positional_arguments,
             working_directory=working_directory,
             working_directory=working_directory,
@@ -353,13 +362,15 @@ def create_archive(
             borg_local_path=local_path,
             borg_local_path=local_path,
             borg_exit_codes=borg_exit_codes,
             borg_exit_codes=borg_exit_codes,
         )
         )
-    else:
-        execute_command(
-            create_flags + create_positional_arguments,
-            output_log_level,
-            output_file,
-            working_directory=working_directory,
-            environment=environment.make_environment(config),
-            borg_local_path=local_path,
-            borg_exit_codes=borg_exit_codes,
-        )
+
+    execute_command(
+        create_flags + create_positional_arguments,
+        output_log_level,
+        output_file,
+        working_directory=working_directory,
+        environment=environment.make_environment(config),
+        borg_local_path=local_path,
+        borg_exit_codes=borg_exit_codes,
+    )
+
+    return None

+ 10 - 3
borgmatic/borg/delete.py

@@ -11,6 +11,9 @@ import borgmatic.execute
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
+FORCE_HARDER_FLAG_COUNT = 2
+
+
 def make_delete_command(
 def make_delete_command(
     repository,
     repository,
     config,
     config,
@@ -36,7 +39,10 @@ def make_delete_command(
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_flags('list', config.get('list_details'))
         + borgmatic.borg.flags.make_flags('list', config.get('list_details'))
         + (
         + (
-            (('--force',) + (('--force',) if delete_arguments.force >= 2 else ()))
+            (
+                ('--force',)
+                + (('--force',) if delete_arguments.force >= FORCE_HARDER_FLAG_COUNT else ())
+            )
             if delete_arguments.force
             if delete_arguments.force
             else ()
             else ()
         )
         )
@@ -98,10 +104,11 @@ def delete_archives(
         for argument_name in ARCHIVE_RELATED_ARGUMENT_NAMES
         for argument_name in ARCHIVE_RELATED_ARGUMENT_NAMES
     ):
     ):
         if borgmatic.borg.feature.available(
         if borgmatic.borg.feature.available(
-            borgmatic.borg.feature.Feature.REPO_DELETE, local_borg_version
+            borgmatic.borg.feature.Feature.REPO_DELETE,
+            local_borg_version,
         ):
         ):
             logger.warning(
             logger.warning(
-                'Deleting an entire repository with the delete action is deprecated when using Borg 2.x+. Use the repo-delete action instead.'
+                'Deleting an entire repository with the delete action is deprecated when using Borg 2.x+. Use the repo-delete action instead.',
             )
             )
 
 
         repo_delete_arguments = argparse.Namespace(
         repo_delete_arguments = argparse.Namespace(

+ 2 - 1
borgmatic/borg/environment.py

@@ -64,7 +64,8 @@ def make_environment(config):
         environment.pop('BORG_PASSCOMMAND', None)
         environment.pop('BORG_PASSCOMMAND', None)
 
 
     passphrase = borgmatic.hooks.credential.parse.resolve_credential(
     passphrase = borgmatic.hooks.credential.parse.resolve_credential(
-        config.get('encryption_passphrase'), config
+        config.get('encryption_passphrase'),
+        config,
     )
     )
 
 
     if passphrase is None:
     if passphrase is None:

+ 1 - 1
borgmatic/borg/export_key.py

@@ -35,7 +35,7 @@ def export_key(
     if export_arguments.path and export_arguments.path != '-':
     if export_arguments.path and export_arguments.path != '-':
         if os.path.exists(os.path.join(working_directory or '', export_arguments.path)):
         if os.path.exists(os.path.join(working_directory or '', export_arguments.path)):
             raise FileExistsError(
             raise FileExistsError(
-                f'Destination path {export_arguments.path} already exists. Aborting.'
+                f'Destination path {export_arguments.path} already exists. Aborting.',
             )
             )
 
 
         output_file = None
         output_file = None

+ 2 - 5
borgmatic/borg/export_tar.py

@@ -56,13 +56,10 @@ def export_tar_archive(
         + (tuple(paths) if paths else ())
         + (tuple(paths) if paths else ())
     )
     )
 
 
-    if config.get('list_details'):
-        output_log_level = logging.ANSWER
-    else:
-        output_log_level = logging.INFO
+    output_log_level = logging.ANSWER if config.get('list_details') else logging.INFO
 
 
     if dry_run:
     if dry_run:
-        logging.info('Skipping export to tar file (dry run)')
+        logger.info('Skipping export to tar file (dry run)')
         return
         return
 
 
     execute_command(
     execute_command(

+ 5 - 1
borgmatic/borg/extract.py

@@ -52,7 +52,9 @@ def extract_last_archive_dry_run(
         + verbosity_flags
         + verbosity_flags
         + list_flag
         + list_flag
         + flags.make_repository_archive_flags(
         + flags.make_repository_archive_flags(
-            repository_path, last_archive_name, local_borg_version
+            repository_path,
+            last_archive_name,
+            local_borg_version,
         )
         )
     )
     )
 
 
@@ -178,3 +180,5 @@ def extract_archive(
         borg_local_path=local_path,
         borg_local_path=local_path,
         borg_exit_codes=borg_exit_codes,
         borg_exit_codes=borg_exit_codes,
     )
     )
+
+    return None

+ 17 - 17
borgmatic/borg/flags.py

@@ -34,7 +34,7 @@ def make_flags_from_arguments(arguments, excludes=()):
             make_flags(name, value=getattr(arguments, name))
             make_flags(name, value=getattr(arguments, name))
             for name in sorted(vars(arguments))
             for name in sorted(vars(arguments))
             if name not in excludes and not name.startswith('_')
             if name not in excludes and not name.startswith('_')
-        )
+        ),
     )
     )
 
 
 
 
@@ -50,7 +50,7 @@ def make_repository_flags(repository_path, local_borg_version):
     ) + (repository_path,)
     ) + (repository_path,)
 
 
 
 
-ARCHIVE_HASH_PATTERN = re.compile('[0-9a-fA-F]{8,}$')
+ARCHIVE_HASH_PATTERN = re.compile(r'[0-9a-fA-F]{8,}$')
 
 
 
 
 def make_repository_archive_flags(repository_path, archive, local_borg_version):
 def make_repository_archive_flags(repository_path, archive, local_borg_version):
@@ -76,8 +76,8 @@ def make_repository_archive_flags(repository_path, archive, local_borg_version):
     )
     )
 
 
 
 
-DEFAULT_ARCHIVE_NAME_FORMAT_WITHOUT_SERIES = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'  # noqa: FS003
-DEFAULT_ARCHIVE_NAME_FORMAT_WITH_SERIES = '{hostname}'  # noqa: FS003
+DEFAULT_ARCHIVE_NAME_FORMAT_WITHOUT_SERIES = '{hostname}-{now:%Y-%m-%dT%H:%M:%S.%f}'
+DEFAULT_ARCHIVE_NAME_FORMAT_WITH_SERIES = '{hostname}'
 
 
 
 
 def get_default_archive_name_format(local_borg_version):
 def get_default_archive_name_format(local_borg_version):
@@ -90,7 +90,7 @@ def get_default_archive_name_format(local_borg_version):
     return DEFAULT_ARCHIVE_NAME_FORMAT_WITHOUT_SERIES
     return DEFAULT_ARCHIVE_NAME_FORMAT_WITHOUT_SERIES
 
 
 
 
-def make_match_archives_flags(
+def make_match_archives_flags(  # noqa: PLR0911
     match_archives,
     match_archives,
     archive_name_format,
     archive_name_format,
     local_borg_version,
     local_borg_version,
@@ -115,8 +115,8 @@ def make_match_archives_flags(
                 return ('--match-archives', f'aid:{match_archives}')
                 return ('--match-archives', f'aid:{match_archives}')
 
 
             return ('--match-archives', match_archives)
             return ('--match-archives', match_archives)
-        else:
-            return ('--glob-archives', re.sub(r'^sh:', '', match_archives))
+
+        return ('--glob-archives', re.sub(r'^sh:', '', match_archives))
 
 
     derived_match_archives = re.sub(
     derived_match_archives = re.sub(
         r'\{(now|utcnow|pid)([:%\w\.-]*)\}',
         r'\{(now|utcnow|pid)([:%\w\.-]*)\}',
@@ -131,8 +131,8 @@ def make_match_archives_flags(
 
 
     if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
     if feature.available(feature.Feature.MATCH_ARCHIVES, local_borg_version):
         return ('--match-archives', f'sh:{derived_match_archives}')
         return ('--match-archives', f'sh:{derived_match_archives}')
-    else:
-        return ('--glob-archives', f'{derived_match_archives}')
+
+    return ('--glob-archives', f'{derived_match_archives}')
 
 
 
 
 def warn_for_aggressive_archive_flags(json_command, json_output):
 def warn_for_aggressive_archive_flags(json_command, json_output):
@@ -150,7 +150,7 @@ def warn_for_aggressive_archive_flags(json_command, json_output):
         if len(json.loads(json_output)['archives']) == 0:
         if len(json.loads(json_output)['archives']) == 0:
             logger.warning('An archive filter was applied, but no matching archives were found.')
             logger.warning('An archive filter was applied, but no matching archives were found.')
             logger.warning(
             logger.warning(
-                'Try adding --match-archives "*" or adjusting archive_name_format/match_archives in configuration.'
+                'Try adding --match-archives "*" or adjusting archive_name_format/match_archives in configuration.',
             )
             )
     except json.JSONDecodeError as error:
     except json.JSONDecodeError as error:
         logger.debug(f'Cannot parse JSON output from archive command: {error}')
         logger.debug(f'Cannot parse JSON output from archive command: {error}')
@@ -193,8 +193,8 @@ def omit_flag_and_value(arguments, flag):
     # its value.
     # its value.
     return tuple(
     return tuple(
         argument
         argument
-        for (previous_argument, argument) in zip((None,) + arguments, arguments)
-        if flag not in (previous_argument, argument)
+        for (previous_argument, argument) in zip((None, *arguments), arguments)
+        if flag not in {previous_argument, argument}
         if not argument.startswith(f'{flag}=')
         if not argument.startswith(f'{flag}=')
     )
     )
 
 
@@ -209,7 +209,7 @@ def make_exclude_flags(config):
         itertools.chain.from_iterable(
         itertools.chain.from_iterable(
             ('--exclude-if-present', if_present)
             ('--exclude-if-present', if_present)
             for if_present in config.get('exclude_if_present', ())
             for if_present in config.get('exclude_if_present', ())
-        )
+        ),
     )
     )
     keep_exclude_tags_flags = ('--keep-exclude-tags',) if config.get('keep_exclude_tags') else ()
     keep_exclude_tags_flags = ('--keep-exclude-tags',) if config.get('keep_exclude_tags') else ()
     exclude_nodump_flags = ('--exclude-nodump',) if config.get('exclude_nodump') else ()
     exclude_nodump_flags = ('--exclude-nodump',) if config.get('exclude_nodump') else ()
@@ -229,10 +229,10 @@ def make_list_filter_flags(local_borg_version, dry_run):
     if feature.available(feature.Feature.EXCLUDED_FILES_MINUS, local_borg_version):
     if feature.available(feature.Feature.EXCLUDED_FILES_MINUS, local_borg_version):
         if show_excludes or dry_run:
         if show_excludes or dry_run:
             return f'{base_flags}+-'
             return f'{base_flags}+-'
-        else:
-            return base_flags
+
+        return base_flags
 
 
     if show_excludes:
     if show_excludes:
         return f'{base_flags}x-'
         return f'{base_flags}x-'
-    else:
-        return f'{base_flags}-'
+
+    return f'{base_flags}-'

+ 4 - 1
borgmatic/borg/info.py

@@ -55,7 +55,8 @@ def make_info_command(
             )
             )
         )
         )
         + flags.make_flags_from_arguments(
         + flags.make_flags_from_arguments(
-            info_arguments, excludes=('repository', 'archive', 'prefix', 'match_archives')
+            info_arguments,
+            excludes=('repository', 'archive', 'prefix', 'match_archives'),
         )
         )
         + flags.make_repository_flags(repository_path, local_borg_version)
         + flags.make_repository_flags(repository_path, local_borg_version)
     )
     )
@@ -119,3 +120,5 @@ def display_archives_info(
         borg_local_path=local_path,
         borg_local_path=local_path,
         borg_exit_codes=borg_exit_codes,
         borg_exit_codes=borg_exit_codes,
     )
     )
+
+    return None

+ 14 - 9
borgmatic/borg/list.py

@@ -17,7 +17,8 @@ MAKE_FLAGS_EXCLUDES = (
     'archive',
     'archive',
     'paths',
     'paths',
     'find_paths',
     'find_paths',
-) + ARCHIVE_FILTER_FLAGS_MOVED_TO_REPO_LIST
+    *ARCHIVE_FILTER_FLAGS_MOVED_TO_REPO_LIST,
+)
 
 
 
 
 def make_list_command(
 def make_list_command(
@@ -53,7 +54,9 @@ def make_list_command(
         + flags.make_flags_from_arguments(list_arguments, excludes=MAKE_FLAGS_EXCLUDES)
         + flags.make_flags_from_arguments(list_arguments, excludes=MAKE_FLAGS_EXCLUDES)
         + (
         + (
             flags.make_repository_archive_flags(
             flags.make_repository_archive_flags(
-                repository_path, list_arguments.archive, local_borg_version
+                repository_path,
+                list_arguments.archive,
+                local_borg_version,
             )
             )
             if list_arguments.archive
             if list_arguments.archive
             else flags.make_repository_flags(repository_path, local_borg_version)
             else flags.make_repository_flags(repository_path, local_borg_version)
@@ -115,10 +118,10 @@ def capture_archive_listing(
                 argparse.Namespace(
                 argparse.Namespace(
                     repository=repository_path,
                     repository=repository_path,
                     archive=archive,
                     archive=archive,
-                    paths=[path for path in list_paths] if list_paths else None,
+                    paths=list(list_paths) if list_paths else None,
                     find_paths=None,
                     find_paths=None,
                     json=None,
                     json=None,
-                    format=path_format or '{path}{NUL}',  # noqa: FS003
+                    format=path_format or '{path}{NUL}',
                 ),
                 ),
                 global_arguments,
                 global_arguments,
                 local_path,
                 local_path,
@@ -130,7 +133,7 @@ def capture_archive_listing(
             borg_exit_codes=config.get('borg_exit_codes'),
             borg_exit_codes=config.get('borg_exit_codes'),
         )
         )
         .strip('\0')
         .strip('\0')
-        .split('\0')
+        .split('\0'),
     )
     )
 
 
 
 
@@ -156,7 +159,7 @@ def list_archive(
     if not list_arguments.archive and not list_arguments.find_paths:
     if not list_arguments.archive and not list_arguments.find_paths:
         if feature.available(feature.Feature.REPO_LIST, local_borg_version):
         if feature.available(feature.Feature.REPO_LIST, local_borg_version):
             logger.warning(
             logger.warning(
-                'Omitting the --archive flag on the list action is deprecated when using Borg 2.x+. Use the repo-list action instead.'
+                'Omitting the --archive flag on the list action is deprecated when using Borg 2.x+. Use the repo-list action instead.',
             )
             )
 
 
         repo_list_arguments = argparse.Namespace(
         repo_list_arguments = argparse.Namespace(
@@ -184,12 +187,12 @@ def list_archive(
         for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_REPO_LIST:
         for name in ARCHIVE_FILTER_FLAGS_MOVED_TO_REPO_LIST:
             if getattr(list_arguments, name, None):
             if getattr(list_arguments, name, None):
                 logger.warning(
                 logger.warning(
-                    f"The --{name.replace('_', '-')} flag on the list action is ignored when using the --archive flag."
+                    f"The --{name.replace('_', '-')} flag on the list action is ignored when using the --archive flag.",
                 )
                 )
 
 
     if list_arguments.json:
     if list_arguments.json:
         raise ValueError(
         raise ValueError(
-            'The --json flag on the list action is not supported when using the --archive/--find flags.'
+            'The --json flag on the list action is not supported when using the --archive/--find flags.',
         )
         )
 
 
     borg_exit_codes = config.get('borg_exit_codes')
     borg_exit_codes = config.get('borg_exit_codes')
@@ -227,7 +230,7 @@ def list_archive(
                 borg_exit_codes=borg_exit_codes,
                 borg_exit_codes=borg_exit_codes,
             )
             )
             .strip('\n')
             .strip('\n')
-            .splitlines()
+            .splitlines(),
         )
         )
     else:
     else:
         archive_lines = (list_arguments.archive,)
         archive_lines = (list_arguments.archive,)
@@ -262,3 +265,5 @@ def list_archive(
             borg_local_path=local_path,
             borg_local_path=local_path,
             borg_exit_codes=borg_exit_codes,
             borg_exit_codes=borg_exit_codes,
         )
         )
+
+    return None

+ 10 - 10
borgmatic/borg/pattern.py

@@ -4,8 +4,6 @@ import logging
 import os
 import os
 import tempfile
 import tempfile
 
 
-import borgmatic.borg.pattern
-
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
@@ -59,9 +57,9 @@ Pattern = collections.namedtuple(
 
 
 def write_patterns_file(patterns, borgmatic_runtime_directory, patterns_file=None):
 def write_patterns_file(patterns, borgmatic_runtime_directory, patterns_file=None):
     '''
     '''
-    Given a sequence of patterns as borgmatic.borg.pattern.Pattern instances, write them to a named
-    temporary file in the given borgmatic runtime directory and return the file object so it can
-    continue to exist on disk as long as the caller needs it.
+    Given a sequence of patterns as Pattern instances, write them to a named temporary file in the
+    given borgmatic runtime directory and return the file object so it can continue to exist on disk
+    as long as the caller needs it.
 
 
     If an optional open pattern file is given, append to it instead of making a new temporary file.
     If an optional open pattern file is given, append to it instead of making a new temporary file.
     Return None if no patterns are provided.
     Return None if no patterns are provided.
@@ -70,7 +68,9 @@ def write_patterns_file(patterns, borgmatic_runtime_directory, patterns_file=Non
         return None
         return None
 
 
     if patterns_file is None:
     if patterns_file is None:
-        patterns_file = tempfile.NamedTemporaryFile('w', dir=borgmatic_runtime_directory)
+        patterns_file = tempfile.NamedTemporaryFile(
+            'w', dir=borgmatic_runtime_directory, encoding='utf-8'
+        )
         operation_name = 'Writing'
         operation_name = 'Writing'
     else:
     else:
         patterns_file.write('\n')
         patterns_file.write('\n')
@@ -90,17 +90,17 @@ def write_patterns_file(patterns, borgmatic_runtime_directory, patterns_file=Non
 
 
 def check_all_root_patterns_exist(patterns):
 def check_all_root_patterns_exist(patterns):
     '''
     '''
-    Given a sequence of borgmatic.borg.pattern.Pattern instances, check that all root pattern
-    paths exist. If any don't, raise an exception.
+    Given a sequence of Pattern instances, check that all root pattern paths exist. If any don't,
+    raise an exception.
     '''
     '''
     missing_paths = [
     missing_paths = [
         pattern.path
         pattern.path
         for pattern in patterns
         for pattern in patterns
-        if pattern.type == borgmatic.borg.pattern.Pattern_type.ROOT
+        if pattern.type == Pattern_type.ROOT
         if not os.path.exists(pattern.path)
         if not os.path.exists(pattern.path)
     ]
     ]
 
 
     if missing_paths:
     if missing_paths:
         raise ValueError(
         raise ValueError(
-            f"Source directories or root pattern paths do not exist: {', '.join(missing_paths)}"
+            f"Source directories or root pattern paths do not exist: {', '.join(missing_paths)}",
         )
         )

+ 4 - 2
borgmatic/borg/recreate.py

@@ -38,7 +38,8 @@ def recreate_archive(
 
 
     # Write patterns to a temporary file and use that file with --patterns-from.
     # Write patterns to a temporary file and use that file with --patterns-from.
     patterns_file = write_patterns_file(
     patterns_file = write_patterns_file(
-        patterns, borgmatic.config.paths.get_working_directory(config)
+        patterns,
+        borgmatic.config.paths.get_working_directory(config),
     )
     )
 
 
     recreate_command = (
     recreate_command = (
@@ -80,7 +81,8 @@ def recreate_archive(
                 )
                 )
             )
             )
             if borgmatic.borg.feature.available(
             if borgmatic.borg.feature.available(
-                borgmatic.borg.feature.Feature.SEPARATE_REPOSITORY_ARCHIVE, local_borg_version
+                borgmatic.borg.feature.Feature.SEPARATE_REPOSITORY_ARCHIVE,
+                local_borg_version,
             )
             )
             else (
             else (
                 flags.make_repository_archive_flags(repository, archive, local_borg_version)
                 flags.make_repository_archive_flags(repository, archive, local_borg_version)

+ 3 - 1
borgmatic/borg/rename.py

@@ -25,7 +25,9 @@ def make_rename_command(
         + borgmatic.borg.flags.make_flags('log-json', config.get('log_json'))
         + borgmatic.borg.flags.make_flags('log-json', config.get('log_json'))
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_repository_archive_flags(
         + borgmatic.borg.flags.make_repository_archive_flags(
-            repository_name, old_archive_name, local_borg_version
+            repository_name,
+            old_archive_name,
+            local_borg_version,
         )
         )
         + (new_archive_name,)
         + (new_archive_name,)
     )
     )

+ 3 - 3
borgmatic/borg/repo_create.py

@@ -49,13 +49,13 @@ def create_repository(
                 global_arguments,
                 global_arguments,
                 local_path,
                 local_path,
                 remote_path,
                 remote_path,
-            )
+            ),
         )
         )
         repository_encryption_mode = info_data.get('encryption', {}).get('mode')
         repository_encryption_mode = info_data.get('encryption', {}).get('mode')
 
 
         if repository_encryption_mode != encryption_mode:
         if repository_encryption_mode != encryption_mode:
             raise ValueError(
             raise ValueError(
-                f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"'
+                f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"',
             )
             )
 
 
         logger.info('Repository already exists. Skipping creation.')
         logger.info('Repository already exists. Skipping creation.')
@@ -92,7 +92,7 @@ def create_repository(
     )
     )
 
 
     if dry_run:
     if dry_run:
-        logging.info('Skipping repository creation (dry run)')
+        logger.info('Skipping repository creation (dry run)')
         return
         return
 
 
     # Do not capture output here, so as to support interactive prompts.
     # Do not capture output here, so as to support interactive prompts.

+ 11 - 3
borgmatic/borg/repo_delete.py

@@ -9,6 +9,9 @@ import borgmatic.execute
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
+FORCE_HARDER_FLAG_COUNT = 2
+
+
 def make_repo_delete_command(
 def make_repo_delete_command(
     repository,
     repository,
     config,
     config,
@@ -28,7 +31,8 @@ def make_repo_delete_command(
         + (
         + (
             ('repo-delete',)
             ('repo-delete',)
             if borgmatic.borg.feature.available(
             if borgmatic.borg.feature.available(
-                borgmatic.borg.feature.Feature.REPO_DELETE, local_borg_version
+                borgmatic.borg.feature.Feature.REPO_DELETE,
+                local_borg_version,
             )
             )
             else ('delete',)
             else ('delete',)
         )
         )
@@ -41,12 +45,16 @@ def make_repo_delete_command(
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_flags('lock-wait', config.get('lock_wait'))
         + borgmatic.borg.flags.make_flags('list', config.get('list_details'))
         + borgmatic.borg.flags.make_flags('list', config.get('list_details'))
         + (
         + (
-            (('--force',) + (('--force',) if repo_delete_arguments.force >= 2 else ()))
+            (
+                ('--force',)
+                + (('--force',) if repo_delete_arguments.force >= FORCE_HARDER_FLAG_COUNT else ())
+            )
             if repo_delete_arguments.force
             if repo_delete_arguments.force
             else ()
             else ()
         )
         )
         + borgmatic.borg.flags.make_flags_from_arguments(
         + borgmatic.borg.flags.make_flags_from_arguments(
-            repo_delete_arguments, excludes=('list_details', 'force', 'repository')
+            repo_delete_arguments,
+            excludes=('list_details', 'force', 'repository'),
         )
         )
         + borgmatic.borg.flags.make_repository_flags(repository['path'], local_borg_version)
         + borgmatic.borg.flags.make_repository_flags(repository['path'], local_borg_version)
     )
     )

+ 11 - 9
borgmatic/borg/repo_info.py

@@ -61,12 +61,14 @@ def display_repository_info(
             borg_local_path=local_path,
             borg_local_path=local_path,
             borg_exit_codes=borg_exit_codes,
             borg_exit_codes=borg_exit_codes,
         )
         )
-    else:
-        execute_command(
-            full_command,
-            output_log_level=logging.ANSWER,
-            environment=environment.make_environment(config),
-            working_directory=working_directory,
-            borg_local_path=local_path,
-            borg_exit_codes=borg_exit_codes,
-        )
+
+    execute_command(
+        full_command,
+        output_log_level=logging.ANSWER,
+        environment=environment.make_environment(config),
+        working_directory=working_directory,
+        borg_local_path=local_path,
+        borg_exit_codes=borg_exit_codes,
+    )
+
+    return None

+ 15 - 15
borgmatic/borg/repo_list.py

@@ -58,22 +58,20 @@ def get_latest_archive(
     '''
     '''
 
 
     full_command = (
     full_command = (
+        local_path,
         (
         (
-            local_path,
-            (
-                'repo-list'
-                if feature.available(feature.Feature.REPO_LIST, local_borg_version)
-                else 'list'
-            ),
-        )
-        + flags.make_flags('remote-path', remote_path)
-        + flags.make_flags('umask', config.get('umask'))
-        + flags.make_flags('log-json', config.get('log_json'))
-        + flags.make_flags('lock-wait', config.get('lock_wait'))
-        + flags.make_flags('consider-checkpoints', consider_checkpoints)
-        + flags.make_flags('last', 1)
-        + ('--json',)
-        + flags.make_repository_flags(repository_path, local_borg_version)
+            'repo-list'
+            if feature.available(feature.Feature.REPO_LIST, local_borg_version)
+            else 'list'
+        ),
+        *flags.make_flags('remote-path', remote_path),
+        *flags.make_flags('umask', config.get('umask')),
+        *flags.make_flags('log-json', config.get('log_json')),
+        *flags.make_flags('lock-wait', config.get('lock_wait')),
+        *flags.make_flags('consider-checkpoints', consider_checkpoints),
+        *flags.make_flags('last', 1),
+        '--json',
+        *flags.make_repository_flags(repository_path, local_borg_version),
     )
     )
 
 
     json_output = execute_command_and_capture_output(
     json_output = execute_command_and_capture_output(
@@ -215,3 +213,5 @@ def list_repository(
         borg_local_path=local_path,
         borg_local_path=local_path,
         borg_exit_codes=borg_exit_codes,
         borg_exit_codes=borg_exit_codes,
     )
     )
+
+    return None

+ 228 - 88
borgmatic/commands/arguments.py

@@ -60,7 +60,7 @@ def get_subactions_for_actions(action_parsers):
         action: tuple(
         action: tuple(
             subaction_name
             subaction_name
             for group_action in action_parser._subparsers._group_actions
             for group_action in action_parser._subparsers._group_actions
-            for subaction_name in group_action.choices.keys()
+            for subaction_name in group_action.choices
         )
         )
         for action, action_parser in action_parsers.items()
         for action, action_parser in action_parsers.items()
         if action_parser._subparsers
         if action_parser._subparsers
@@ -77,21 +77,25 @@ def omit_values_colliding_with_action_names(unparsed_arguments, parsed_arguments
     '''
     '''
     remaining_arguments = list(unparsed_arguments)
     remaining_arguments = list(unparsed_arguments)
 
 
-    for action_name, parsed in parsed_arguments.items():
+    for parsed in parsed_arguments.values():
         for value in vars(parsed).values():
         for value in vars(parsed).values():
             if isinstance(value, str):
             if isinstance(value, str):
-                if value in ACTION_ALIASES.keys() and value in remaining_arguments:
+                if value in ACTION_ALIASES and value in remaining_arguments:
                     remaining_arguments.remove(value)
                     remaining_arguments.remove(value)
             elif isinstance(value, list):
             elif isinstance(value, list):
                 for item in value:
                 for item in value:
-                    if item in ACTION_ALIASES.keys() and item in remaining_arguments:
+                    if item in ACTION_ALIASES and item in remaining_arguments:
                         remaining_arguments.remove(item)
                         remaining_arguments.remove(item)
 
 
     return tuple(remaining_arguments)
     return tuple(remaining_arguments)
 
 
 
 
 def parse_and_record_action_arguments(
 def parse_and_record_action_arguments(
-    unparsed_arguments, parsed_arguments, action_parser, action_name, canonical_name=None
+    unparsed_arguments,
+    parsed_arguments,
+    action_parser,
+    action_name,
+    canonical_name=None,
 ):
 ):
     '''
     '''
     Given unparsed arguments as a sequence of strings, parsed arguments as a dict from action name
     Given unparsed arguments as a sequence of strings, parsed arguments as a dict from action name
@@ -102,7 +106,8 @@ def parse_and_record_action_arguments(
     given action doesn't apply to the given unparsed arguments.
     given action doesn't apply to the given unparsed arguments.
     '''
     '''
     filtered_arguments = omit_values_colliding_with_action_names(
     filtered_arguments = omit_values_colliding_with_action_names(
-        unparsed_arguments, parsed_arguments
+        unparsed_arguments,
+        parsed_arguments,
     )
     )
 
 
     if action_name not in filtered_arguments:
     if action_name not in filtered_arguments:
@@ -186,12 +191,12 @@ def get_unparsable_arguments(remaining_action_arguments):
         itertools.chain.from_iterable(
         itertools.chain.from_iterable(
             argument_group
             argument_group
             for argument_group in dict.fromkeys(
             for argument_group in dict.fromkeys(
-                itertools.chain.from_iterable(grouped_action_arguments)
-            ).keys()
+                itertools.chain.from_iterable(grouped_action_arguments),
+            )
             if all(
             if all(
                 argument_group in action_arguments for action_arguments in grouped_action_arguments
                 argument_group in action_arguments for action_arguments in grouped_action_arguments
             )
             )
-        )
+        ),
     )
     )
 
 
 
 
@@ -244,7 +249,7 @@ def parse_arguments_for_actions(unparsed_arguments, action_parsers, global_parse
                             subaction_name,
                             subaction_name,
                         )
                         )
                         if argument != action_name
                         if argument != action_name
-                    )
+                    ),
                 )
                 )
 
 
                 if subaction_name in arguments:
                 if subaction_name in arguments:
@@ -256,14 +261,18 @@ def parse_arguments_for_actions(unparsed_arguments, action_parsers, global_parse
                     sys.exit(0)
                     sys.exit(0)
                 else:
                 else:
                     raise ValueError(
                     raise ValueError(
-                        f"Missing sub-action after {action_name} action. Expected one of: {', '.join(get_subactions_for_actions(action_parsers)[action_name])}"
+                        f"Missing sub-action after {action_name} action. Expected one of: {', '.join(get_subactions_for_actions(action_parsers)[action_name])}",
                     )
                     )
         # Otherwise, parse with the main action parser.
         # Otherwise, parse with the main action parser.
         else:
         else:
             remaining_action_arguments.append(
             remaining_action_arguments.append(
                 parse_and_record_action_arguments(
                 parse_and_record_action_arguments(
-                    unparsed_arguments, arguments, action_parser, action_name, canonical_name
-                )
+                    unparsed_arguments,
+                    arguments,
+                    action_parser,
+                    action_name,
+                    canonical_name,
+                ),
             )
             )
 
 
     # If no actions were explicitly requested, assume defaults.
     # If no actions were explicitly requested, assume defaults.
@@ -272,11 +281,11 @@ def parse_arguments_for_actions(unparsed_arguments, action_parsers, global_parse
             default_action_parser = action_parsers[default_action_name]
             default_action_parser = action_parsers[default_action_name]
             remaining_action_arguments.append(
             remaining_action_arguments.append(
                 parse_and_record_action_arguments(
                 parse_and_record_action_arguments(
-                    tuple(unparsed_arguments) + (default_action_name,),
+                    (*unparsed_arguments, default_action_name),
                     arguments,
                     arguments,
                     default_action_parser,
                     default_action_parser,
                     default_action_name,
                     default_action_name,
-                )
+                ),
             )
             )
 
 
     arguments['global'], remaining = global_parser.parse_known_args(unparsed_arguments)
     arguments['global'], remaining = global_parser.parse_known_args(unparsed_arguments)
@@ -304,10 +313,10 @@ def make_argument_description(schema, flag_name):
 
 
     if '[0]' in flag_name:
     if '[0]' in flag_name:
         pieces.append(
         pieces.append(
-            ' To specify a different list element, replace the "[0]" with another array index ("[1]", "[2]", etc.).'
+            ' To specify a different list element, replace the "[0]" with another array index ("[1]", "[2]", etc.).',
         )
         )
 
 
-    if example and schema_type in ('array', 'object'):
+    if example and schema_type in ('array', 'object'):  # noqa: PLR6201
         example_buffer = io.StringIO()
         example_buffer = io.StringIO()
         yaml = ruamel.yaml.YAML(typ='safe')
         yaml = ruamel.yaml.YAML(typ='safe')
         yaml.default_flow_style = True
         yaml.default_flow_style = True
@@ -387,7 +396,7 @@ def add_array_element_arguments(arguments_group, unparsed_arguments, flag_name):
         if not pattern.match(unparsed_flag_name) or unparsed_flag_name == existing_flag_name:
         if not pattern.match(unparsed_flag_name) or unparsed_flag_name == existing_flag_name:
             continue
             continue
 
 
-        if action_registry_name in ('store_true', 'store_false'):
+        if action_registry_name in {'store_true', 'store_false'}:
             arguments_group.add_argument(
             arguments_group.add_argument(
                 unparsed_flag_name,
                 unparsed_flag_name,
                 action=action_registry_name,
                 action=action_registry_name,
@@ -408,7 +417,7 @@ def add_array_element_arguments(arguments_group, unparsed_arguments, flag_name):
             )
             )
 
 
 
 
-def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names=None):
+def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names=None):  # noqa: PLR0912
     '''
     '''
     Given an argparse._ArgumentGroup instance, a configuration schema dict, and a sequence of
     Given an argparse._ArgumentGroup instance, a configuration schema dict, and a sequence of
     unparsed argument strings, convert the entire schema into corresponding command-line flags and
     unparsed argument strings, convert the entire schema into corresponding command-line flags and
@@ -466,7 +475,10 @@ def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names
         if properties:
         if properties:
             for name, child in properties.items():
             for name, child in properties.items():
                 add_arguments_from_schema(
                 add_arguments_from_schema(
-                    arguments_group, child, unparsed_arguments, names + (name,)
+                    arguments_group,
+                    child,
+                    unparsed_arguments,
+                    (*names, name),
                 )
                 )
 
 
             return
             return
@@ -483,12 +495,15 @@ def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names
                     arguments_group,
                     arguments_group,
                     child,
                     child,
                     unparsed_arguments,
                     unparsed_arguments,
-                    names[:-1] + (f'{names[-1]}[0]',) + (name,),
+                    (*names[:-1], f'{names[-1]}[0]', name),
                 )
                 )
         # If there aren't any children, then this is an array of scalars. Recurse accordingly.
         # If there aren't any children, then this is an array of scalars. Recurse accordingly.
         else:
         else:
             add_arguments_from_schema(
             add_arguments_from_schema(
-                arguments_group, items, unparsed_arguments, names[:-1] + (f'{names[-1]}[0]',)
+                arguments_group,
+                items,
+                unparsed_arguments,
+                (*names[:-1], f'{names[-1]}[0]'),
             )
             )
 
 
     flag_name = '.'.join(names).replace('_', '-')
     flag_name = '.'.join(names).replace('_', '-')
@@ -515,9 +530,9 @@ def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names
         )
         )
 
 
         if names[-1].startswith('no_'):
         if names[-1].startswith('no_'):
-            no_flag_name = '.'.join(names[:-1] + (names[-1][len('no_') :],)).replace('_', '-')
+            no_flag_name = '.'.join((*names[:-1], names[-1][len('no_') :])).replace('_', '-')
         else:
         else:
-            no_flag_name = '.'.join(names[:-1] + ('no-' + names[-1],)).replace('_', '-')
+            no_flag_name = '.'.join((*names[:-1], 'no-' + names[-1])).replace('_', '-')
 
 
         arguments_group.add_argument(
         arguments_group.add_argument(
             f'--{no_flag_name}',
             f'--{no_flag_name}',
@@ -545,7 +560,7 @@ def add_arguments_from_schema(arguments_group, schema, unparsed_arguments, names
     add_array_element_arguments(arguments_group, unparsed_arguments, flag_name)
     add_array_element_arguments(arguments_group, unparsed_arguments, flag_name)
 
 
 
 
-def make_parsers(schema, unparsed_arguments):
+def make_parsers(schema, unparsed_arguments):  # noqa: PLR0915
     '''
     '''
     Given a configuration schema dict and unparsed arguments as a sequence of strings, build a
     Given a configuration schema dict and unparsed arguments as a sequence of strings, build a
     global arguments parser, individual action parsers, and a combined parser containing both.
     global arguments parser, individual action parsers, and a combined parser containing both.
@@ -670,7 +685,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Create any missing parent directories of the repository directory [Borg 1.x only]',
         help='Create any missing parent directories of the repository directory [Borg 1.x only]',
     )
     )
     repo_create_group.add_argument(
     repo_create_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     transfer_parser = action_parsers.add_parser(
     transfer_parser = action_parsers.add_parser(
@@ -712,7 +730,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Only transfer archives with names, hashes, or series matching this pattern',
         help='Only transfer archives with names, hashes, or series matching this pattern',
     )
     )
     transfer_group.add_argument(
     transfer_group.add_argument(
-        '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
+        '--sort-by',
+        metavar='KEYS',
+        help='Comma-separated list of sorting keys',
     )
     )
     transfer_group.add_argument(
     transfer_group.add_argument(
         '--first',
         '--first',
@@ -720,7 +740,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Only transfer first N archives after other filters are applied',
         help='Only transfer first N archives after other filters are applied',
     )
     )
     transfer_group.add_argument(
     transfer_group.add_argument(
-        '--last', metavar='N', help='Only transfer last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='Only transfer last N archives after other filters are applied',
     )
     )
     transfer_group.add_argument(
     transfer_group.add_argument(
         '--oldest',
         '--oldest',
@@ -743,7 +765,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Transfer archives that are newer than the specified time range (e.g. 7d or 12m) from the current time [Borg 2.x+ only]',
         help='Transfer archives that are newer than the specified time range (e.g. 7d or 12m) from the current time [Borg 2.x+ only]',
     )
     )
     transfer_group.add_argument(
     transfer_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     prune_parser = action_parsers.add_parser(
     prune_parser = action_parsers.add_parser(
@@ -833,7 +858,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
         help='Minimum saved space percentage threshold for compacting a segment, defaults to 10',
     )
     )
     compact_group.add_argument(
     compact_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     create_parser = action_parsers.add_parser(
     create_parser = action_parsers.add_parser(
@@ -870,7 +898,11 @@ def make_parsers(schema, unparsed_arguments):
         help='Show per-file details',
         help='Show per-file details',
     )
     )
     create_group.add_argument(
     create_group.add_argument(
-        '--json', dest='json', default=False, action='store_true', help='Output results as JSON'
+        '--json',
+        dest='json',
+        default=False,
+        action='store_true',
+        help='Output results as JSON',
     )
     )
     create_group.add_argument(
     create_group.add_argument(
         '--comment',
         '--comment',
@@ -996,13 +1028,19 @@ def make_parsers(schema, unparsed_arguments):
         help='Only delete archives with names, hashes, or series matching this pattern',
         help='Only delete archives with names, hashes, or series matching this pattern',
     )
     )
     delete_group.add_argument(
     delete_group.add_argument(
-        '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
+        '--sort-by',
+        metavar='KEYS',
+        help='Comma-separated list of sorting keys',
     )
     )
     delete_group.add_argument(
     delete_group.add_argument(
-        '--first', metavar='N', help='Delete first N archives after other filters are applied'
+        '--first',
+        metavar='N',
+        help='Delete first N archives after other filters are applied',
     )
     )
     delete_group.add_argument(
     delete_group.add_argument(
-        '--last', metavar='N', help='Delete last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='Delete last N archives after other filters are applied',
     )
     )
     delete_group.add_argument(
     delete_group.add_argument(
         '--oldest',
         '--oldest',
@@ -1039,7 +1077,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to extract, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to extract, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     extract_group.add_argument(
     extract_group.add_argument(
-        '--archive', help='Name or hash of a single archive to extract (or "latest")', required=True
+        '--archive',
+        help='Name or hash of a single archive to extract (or "latest")',
+        required=True,
     )
     )
     extract_group.add_argument(
     extract_group.add_argument(
         '--path',
         '--path',
@@ -1068,7 +1108,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Display progress for each file as it is extracted',
         help='Display progress for each file as it is extracted',
     )
     )
     extract_group.add_argument(
     extract_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     config_parser = action_parsers.add_parser(
     config_parser = action_parsers.add_parser(
@@ -1093,7 +1136,7 @@ def make_parsers(schema, unparsed_arguments):
         add_help=False,
         add_help=False,
     )
     )
     config_bootstrap_group = config_bootstrap_parser.add_argument_group(
     config_bootstrap_group = config_bootstrap_parser.add_argument_group(
-        'config bootstrap arguments'
+        'config bootstrap arguments',
     )
     )
     config_bootstrap_group.add_argument(
     config_bootstrap_group.add_argument(
         '--repository',
         '--repository',
@@ -1148,7 +1191,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Command to use instead of "ssh"',
         help='Command to use instead of "ssh"',
     )
     )
     config_bootstrap_group.add_argument(
     config_bootstrap_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     config_generate_parser = config_parsers.add_parser(
     config_generate_parser = config_parsers.add_parser(
@@ -1178,7 +1224,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Whether to overwrite any existing destination file, defaults to false',
         help='Whether to overwrite any existing destination file, defaults to false',
     )
     )
     config_generate_group.add_argument(
     config_generate_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     config_validate_parser = config_parsers.add_parser(
     config_validate_parser = config_parsers.add_parser(
@@ -1195,7 +1244,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Show the validated configuration after all include merging has occurred',
         help='Show the validated configuration after all include merging has occurred',
     )
     )
     config_validate_group.add_argument(
     config_validate_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     export_tar_parser = action_parsers.add_parser(
     export_tar_parser = action_parsers.add_parser(
@@ -1211,7 +1263,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to export from, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to export from, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     export_tar_group.add_argument(
     export_tar_group.add_argument(
-        '--archive', help='Name or hash of a single archive to export (or "latest")', required=True
+        '--archive',
+        help='Name or hash of a single archive to export (or "latest")',
+        required=True,
     )
     )
     export_tar_group.add_argument(
     export_tar_group.add_argument(
         '--path',
         '--path',
@@ -1228,7 +1282,8 @@ def make_parsers(schema, unparsed_arguments):
         required=True,
         required=True,
     )
     )
     export_tar_group.add_argument(
     export_tar_group.add_argument(
-        '--tar-filter', help='Name of filter program to pipe data through'
+        '--tar-filter',
+        help='Name of filter program to pipe data through',
     )
     )
     export_tar_group.add_argument(
     export_tar_group.add_argument(
         '--list',
         '--list',
@@ -1246,7 +1301,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
         help='Number of leading path components to remove from each exported path. Skip paths with fewer elements',
     )
     )
     export_tar_group.add_argument(
     export_tar_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     mount_parser = action_parsers.add_parser(
     mount_parser = action_parsers.add_parser(
@@ -1262,7 +1320,8 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to use, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to use, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     mount_group.add_argument(
     mount_group.add_argument(
-        '--archive', help='Name or hash of a single archive to mount (or "latest")'
+        '--archive',
+        help='Name or hash of a single archive to mount (or "latest")',
     )
     )
     mount_group.add_argument(
     mount_group.add_argument(
         '--mount-point',
         '--mount-point',
@@ -1291,7 +1350,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Mount first N archives after other filters are applied',
         help='Mount first N archives after other filters are applied',
     )
     )
     mount_group.add_argument(
     mount_group.add_argument(
-        '--last', metavar='N', help='Mount last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='Mount last N archives after other filters are applied',
     )
     )
     mount_group.add_argument(
     mount_group.add_argument(
         '--oldest',
         '--oldest',
@@ -1368,7 +1429,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Do not delete the local security info when deleting a repository',
         help='Do not delete the local security info when deleting a repository',
     )
     )
     repo_delete_group.add_argument(
     repo_delete_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     restore_parser = action_parsers.add_parser(
     restore_parser = action_parsers.add_parser(
@@ -1437,7 +1501,10 @@ def make_parsers(schema, unparsed_arguments):
         help='The name of the data source hook for the dump to restore, only necessary if you need to disambiguate dumps',
         help='The name of the data source hook for the dump to restore, only necessary if you need to disambiguate dumps',
     )
     )
     restore_group.add_argument(
     restore_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     repo_list_parser = action_parsers.add_parser(
     repo_list_parser = action_parsers.add_parser(
@@ -1453,14 +1520,22 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to list, defaults to the configured repositories, quoted globs supported',
         help='Path of repository to list, defaults to the configured repositories, quoted globs supported',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '--short', default=False, action='store_true', help='Output only archive names'
+        '--short',
+        default=False,
+        action='store_true',
+        help='Output only archive names',
     )
     )
     repo_list_group.add_argument('--format', help='Format for archive listing')
     repo_list_group.add_argument('--format', help='Format for archive listing')
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '--json', default=False, action='store_true', help='Output results as JSON'
+        '--json',
+        default=False,
+        action='store_true',
+        help='Output results as JSON',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '-P', '--prefix', help='Deprecated. Only list archive names starting with this prefix'
+        '-P',
+        '--prefix',
+        help='Deprecated. Only list archive names starting with this prefix',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
         '-a',
         '-a',
@@ -1470,13 +1545,19 @@ def make_parsers(schema, unparsed_arguments):
         help='Only list archive names, hashes, or series matching this pattern',
         help='Only list archive names, hashes, or series matching this pattern',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
+        '--sort-by',
+        metavar='KEYS',
+        help='Comma-separated list of sorting keys',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '--first', metavar='N', help='List first N archives after other filters are applied'
+        '--first',
+        metavar='N',
+        help='List first N archives after other filters are applied',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '--last', metavar='N', help='List last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='List last N archives after other filters are applied',
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
         '--oldest',
         '--oldest',
@@ -1505,7 +1586,10 @@ def make_parsers(schema, unparsed_arguments):
         help="List only deleted archives that haven't yet been compacted [Borg 2.x+ only]",
         help="List only deleted archives that haven't yet been compacted [Borg 2.x+ only]",
     )
     )
     repo_list_group.add_argument(
     repo_list_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     list_parser = action_parsers.add_parser(
     list_parser = action_parsers.add_parser(
@@ -1521,7 +1605,8 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository containing archive to list, defaults to the configured repositories, quoted globs supported',
         help='Path of repository containing archive to list, defaults to the configured repositories, quoted globs supported',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--archive', help='Name or hash of a single archive to list (or "latest")'
+        '--archive',
+        help='Name or hash of a single archive to list (or "latest")',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
         '--path',
         '--path',
@@ -1538,14 +1623,22 @@ def make_parsers(schema, unparsed_arguments):
         help='Partial path or pattern to search for and list across multiple archives, can specify flag multiple times',
         help='Partial path or pattern to search for and list across multiple archives, can specify flag multiple times',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--short', default=False, action='store_true', help='Output only path names'
+        '--short',
+        default=False,
+        action='store_true',
+        help='Output only path names',
     )
     )
     list_group.add_argument('--format', help='Format for file listing')
     list_group.add_argument('--format', help='Format for file listing')
     list_group.add_argument(
     list_group.add_argument(
-        '--json', default=False, action='store_true', help='Output results as JSON'
+        '--json',
+        default=False,
+        action='store_true',
+        help='Output results as JSON',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '-P', '--prefix', help='Deprecated. Only list archive names starting with this prefix'
+        '-P',
+        '--prefix',
+        help='Deprecated. Only list archive names starting with this prefix',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
         '-a',
         '-a',
@@ -1555,19 +1648,30 @@ def make_parsers(schema, unparsed_arguments):
         help='Only list archive names matching this pattern',
         help='Only list archive names matching this pattern',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
+        '--sort-by',
+        metavar='KEYS',
+        help='Comma-separated list of sorting keys',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--first', metavar='N', help='List first N archives after other filters are applied'
+        '--first',
+        metavar='N',
+        help='List first N archives after other filters are applied',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--last', metavar='N', help='List last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='List last N archives after other filters are applied',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '-e', '--exclude', metavar='PATTERN', help='Exclude paths matching the pattern'
+        '-e',
+        '--exclude',
+        metavar='PATTERN',
+        help='Exclude paths matching the pattern',
     )
     )
     list_group.add_argument(
     list_group.add_argument(
-        '--exclude-from', metavar='FILENAME', help='Exclude paths from exclude file, one per line'
+        '--exclude-from',
+        metavar='FILENAME',
+        help='Exclude paths from exclude file, one per line',
     )
     )
     list_group.add_argument('--pattern', help='Include or exclude paths matching a pattern')
     list_group.add_argument('--pattern', help='Include or exclude paths matching a pattern')
     list_group.add_argument(
     list_group.add_argument(
@@ -1590,10 +1694,17 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to show info for, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to show info for, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     repo_info_group.add_argument(
     repo_info_group.add_argument(
-        '--json', dest='json', default=False, action='store_true', help='Output results as JSON'
+        '--json',
+        dest='json',
+        default=False,
+        action='store_true',
+        help='Output results as JSON',
     )
     )
     repo_info_group.add_argument(
     repo_info_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     info_parser = action_parsers.add_parser(
     info_parser = action_parsers.add_parser(
@@ -1609,10 +1720,15 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository containing archive to show info for, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository containing archive to show info for, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
-        '--archive', help='Archive name, hash, or series to show info for (or "latest")'
+        '--archive',
+        help='Archive name, hash, or series to show info for (or "latest")',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
-        '--json', dest='json', default=False, action='store_true', help='Output results as JSON'
+        '--json',
+        dest='json',
+        default=False,
+        action='store_true',
+        help='Output results as JSON',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
         '-P',
         '-P',
@@ -1627,7 +1743,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Only show info for archive names, hashes, or series matching this pattern',
         help='Only show info for archive names, hashes, or series matching this pattern',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
-        '--sort-by', metavar='KEYS', help='Comma-separated list of sorting keys'
+        '--sort-by',
+        metavar='KEYS',
+        help='Comma-separated list of sorting keys',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
         '--first',
         '--first',
@@ -1635,7 +1753,9 @@ def make_parsers(schema, unparsed_arguments):
         help='Show info for first N archives after other filters are applied',
         help='Show info for first N archives after other filters are applied',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
-        '--last', metavar='N', help='Show info for last N archives after other filters are applied'
+        '--last',
+        metavar='N',
+        help='Show info for last N archives after other filters are applied',
     )
     )
     info_group.add_argument(
     info_group.add_argument(
         '--oldest',
         '--oldest',
@@ -1672,7 +1792,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to break the lock for, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to break the lock for, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     break_lock_group.add_argument(
     break_lock_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     key_parser = action_parsers.add_parser(
     key_parser = action_parsers.add_parser(
@@ -1717,7 +1840,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Path to export the key to, defaults to stdout (but be careful about dirtying the output with --verbosity)',
         help='Path to export the key to, defaults to stdout (but be careful about dirtying the output with --verbosity)',
     )
     )
     key_export_group.add_argument(
     key_export_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     key_import_parser = key_parsers.add_parser(
     key_import_parser = key_parsers.add_parser(
@@ -1742,7 +1868,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Path to import the key from backup, defaults to stdin',
         help='Path to import the key from backup, defaults to stdin',
     )
     )
     key_import_group.add_argument(
     key_import_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     key_change_passphrase_parser = key_parsers.add_parser(
     key_change_passphrase_parser = key_parsers.add_parser(
@@ -1752,14 +1881,17 @@ def make_parsers(schema, unparsed_arguments):
         add_help=False,
         add_help=False,
     )
     )
     key_change_passphrase_group = key_change_passphrase_parser.add_argument_group(
     key_change_passphrase_group = key_change_passphrase_parser.add_argument_group(
-        'key change-passphrase arguments'
+        'key change-passphrase arguments',
     )
     )
     key_change_passphrase_group.add_argument(
     key_change_passphrase_group.add_argument(
         '--repository',
         '--repository',
         help='Path of repository to change the passphrase for, defaults to the configured repository if there is only one, quoted globs supported',
         help='Path of repository to change the passphrase for, defaults to the configured repository if there is only one, quoted globs supported',
     )
     )
     key_change_passphrase_group.add_argument(
     key_change_passphrase_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     recreate_parser = action_parsers.add_parser(
     recreate_parser = action_parsers.add_parser(
@@ -1809,7 +1941,10 @@ def make_parsers(schema, unparsed_arguments):
         help='Only consider archive names, hashes, or series matching this pattern [Borg 2.x+ only]',
         help='Only consider archive names, hashes, or series matching this pattern [Borg 2.x+ only]',
     )
     )
     recreate_group.add_argument(
     recreate_group.add_argument(
-        '-h', '--help', action='help', help='Show this help message and exit'
+        '-h',
+        '--help',
+        action='help',
+        help='Show this help message and exit',
     )
     )
 
 
     borg_parser = action_parsers.add_parser(
     borg_parser = action_parsers.add_parser(
@@ -1825,7 +1960,8 @@ def make_parsers(schema, unparsed_arguments):
         help='Path of repository to pass to Borg, defaults to the configured repositories, quoted globs supported',
         help='Path of repository to pass to Borg, defaults to the configured repositories, quoted globs supported',
     )
     )
     borg_group.add_argument(
     borg_group.add_argument(
-        '--archive', help='Archive name, hash, or series to pass to Borg (or "latest")'
+        '--archive',
+        help='Archive name, hash, or series to pass to Borg (or "latest")',
     )
     )
     borg_group.add_argument(
     borg_group.add_argument(
         '--',
         '--',
@@ -1839,6 +1975,9 @@ def make_parsers(schema, unparsed_arguments):
     return global_parser, action_parsers, global_plus_action_parser
     return global_parser, action_parsers, global_plus_action_parser
 
 
 
 
+HIGHLANDER_ACTION_ARGUMENTS_COUNT = 2  # 1 for "global" + 1 for the action
+
+
 def parse_arguments(schema, *unparsed_arguments):
 def parse_arguments(schema, *unparsed_arguments):
     '''
     '''
     Given a configuration schema dict and the command-line arguments with which this script was
     Given a configuration schema dict and the command-line arguments with which this script was
@@ -1849,21 +1988,22 @@ def parse_arguments(schema, *unparsed_arguments):
     Raise SystemExit with an error code of 0 if "--help" was requested.
     Raise SystemExit with an error code of 0 if "--help" was requested.
     '''
     '''
     global_parser, action_parsers, global_plus_action_parser = make_parsers(
     global_parser, action_parsers, global_plus_action_parser = make_parsers(
-        schema, unparsed_arguments
+        schema,
+        unparsed_arguments,
     )
     )
     arguments, remaining_action_arguments = parse_arguments_for_actions(
     arguments, remaining_action_arguments = parse_arguments_for_actions(
-        unparsed_arguments, action_parsers.choices, global_parser
+        unparsed_arguments,
+        action_parsers.choices,
+        global_parser,
     )
     )
 
 
     if not arguments['global'].config_paths:
     if not arguments['global'].config_paths:
         arguments['global'].config_paths = collect.get_default_config_paths(expand_home=True)
         arguments['global'].config_paths = collect.get_default_config_paths(expand_home=True)
 
 
     for action_name in ('bootstrap', 'generate', 'validate'):
     for action_name in ('bootstrap', 'generate', 'validate'):
-        if (
-            action_name in arguments.keys() and len(arguments.keys()) > 2
-        ):  # 2 = 1 for 'global' + 1 for the action
+        if action_name in arguments and len(arguments) > HIGHLANDER_ACTION_ARGUMENTS_COUNT:
             raise ValueError(
             raise ValueError(
-                f'The {action_name} action cannot be combined with other actions. Please run it separately.'
+                f'The {action_name} action cannot be combined with other actions. Please run it separately.',
             )
             )
 
 
     unknown_arguments = get_unparsable_arguments(remaining_action_arguments)
     unknown_arguments = get_unparsable_arguments(remaining_action_arguments)
@@ -1875,11 +2015,11 @@ def parse_arguments(schema, *unparsed_arguments):
 
 
         global_plus_action_parser.print_usage()
         global_plus_action_parser.print_usage()
         raise ValueError(
         raise ValueError(
-            f"Unrecognized argument{'s' if len(unknown_arguments) > 1 else ''}: {' '.join(unknown_arguments)}"
+            f"Unrecognized argument{'s' if len(unknown_arguments) > 1 else ''}: {' '.join(unknown_arguments)}",
         )
         )
 
 
     if (
     if (
-        ('list' in arguments and 'repo-info' in arguments and arguments['list'].json)
+        ('list' in arguments and 'repo-info' in arguments and arguments['list'].json)  # noqa: PLR0916
         or ('list' in arguments and 'info' in arguments and arguments['list'].json)
         or ('list' in arguments and 'info' in arguments and arguments['list'].json)
         or ('repo-info' in arguments and 'info' in arguments and arguments['repo-info'].json)
         or ('repo-info' in arguments and 'info' in arguments and arguments['repo-info'].json)
     ):
     ):
@@ -1887,23 +2027,23 @@ def parse_arguments(schema, *unparsed_arguments):
 
 
     if 'list' in arguments and (arguments['list'].prefix and arguments['list'].match_archives):
     if 'list' in arguments and (arguments['list'].prefix and arguments['list'].match_archives):
         raise ValueError(
         raise ValueError(
-            'With the list action, only one of --prefix or --match-archives flags can be used.'
+            'With the list action, only one of --prefix or --match-archives flags can be used.',
         )
         )
 
 
     if 'repo-list' in arguments and (
     if 'repo-list' in arguments and (
         arguments['repo-list'].prefix and arguments['repo-list'].match_archives
         arguments['repo-list'].prefix and arguments['repo-list'].match_archives
     ):
     ):
         raise ValueError(
         raise ValueError(
-            'With the repo-list action, only one of --prefix or --match-archives flags can be used.'
+            'With the repo-list action, only one of --prefix or --match-archives flags can be used.',
         )
         )
 
 
-    if 'info' in arguments and (
+    if 'info' in arguments and (  # noqa: PLR0916
         (arguments['info'].archive and arguments['info'].prefix)
         (arguments['info'].archive and arguments['info'].prefix)
         or (arguments['info'].archive and arguments['info'].match_archives)
         or (arguments['info'].archive and arguments['info'].match_archives)
         or (arguments['info'].prefix and arguments['info'].match_archives)
         or (arguments['info'].prefix and arguments['info'].match_archives)
     ):
     ):
         raise ValueError(
         raise ValueError(
-            'With the info action, only one of --archive, --prefix, or --match-archives flags can be used.'
+            'With the info action, only one of --archive, --prefix, or --match-archives flags can be used.',
         )
         )
 
 
     if 'borg' in arguments and arguments['global'].dry_run:
     if 'borg' in arguments and arguments['global'].dry_run:

+ 125 - 110
borgmatic/commands/borgmatic.py

@@ -96,7 +96,7 @@ class Monitoring_hooks:
         self.config = config
         self.config = config
         self.dry_run = global_arguments.dry_run
         self.dry_run = global_arguments.dry_run
         self.monitoring_log_level = verbosity_to_log_level(
         self.monitoring_log_level = verbosity_to_log_level(
-            get_verbosity({config_filename: config}, 'monitoring_verbosity')
+            get_verbosity({config_filename: config}, 'monitoring_verbosity'),
         )
         )
         self.monitoring_hooks_are_activated = (
         self.monitoring_hooks_are_activated = (
             using_primary_action and self.monitoring_log_level != DISABLED
             using_primary_action and self.monitoring_log_level != DISABLED
@@ -182,7 +182,7 @@ class Monitoring_hooks:
         )
         )
 
 
 
 
-def run_configuration(config_filename, config, config_paths, arguments):
+def run_configuration(config_filename, config, config_paths, arguments):  # noqa: PLR0912, PLR0915
     '''
     '''
     Given a config filename, the corresponding parsed config dict, a sequence of loaded
     Given a config filename, the corresponding parsed config dict, a sequence of loaded
     configuration paths, and command-line arguments as a dict from subparser name to a namespace of
     configuration paths, and command-line arguments as a dict from subparser name to a namespace of
@@ -206,12 +206,13 @@ def run_configuration(config_filename, config, config_paths, arguments):
 
 
     if skip_actions:
     if skip_actions:
         logger.debug(
         logger.debug(
-            f"Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions"
+            f"Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions",
         )
         )
 
 
-    try:
-        with Monitoring_hooks(config_filename, config, arguments, global_arguments):
-            with borgmatic.hooks.command.Before_after_hooks(
+    try:  # noqa: PLR1702
+        with (
+            Monitoring_hooks(config_filename, config, arguments, global_arguments),
+            borgmatic.hooks.command.Before_after_hooks(
                 command_hooks=config.get('commands'),
                 command_hooks=config.get('commands'),
                 before_after='configuration',
                 before_after='configuration',
                 umask=config.get('umask'),
                 umask=config.get('umask'),
@@ -220,75 +221,77 @@ def run_configuration(config_filename, config, config_paths, arguments):
                 action_names=arguments.keys(),
                 action_names=arguments.keys(),
                 configuration_filename=config_filename,
                 configuration_filename=config_filename,
                 log_file=config.get('log_file', ''),
                 log_file=config.get('log_file', ''),
-            ):
-                try:
-                    local_borg_version = borg_version.local_borg_version(config, local_path)
-                    logger.debug(f'Borg {local_borg_version}')
-                except (OSError, CalledProcessError, ValueError) as error:
-                    yield from log_error_records(
-                        f'{config_filename}: Error getting local Borg version', error
-                    )
-                    raise
+            ),
+        ):
+            try:
+                local_borg_version = borg_version.local_borg_version(config, local_path)
+                logger.debug(f'Borg {local_borg_version}')
+            except (OSError, CalledProcessError, ValueError) as error:
+                yield from log_error_records(
+                    f'{config_filename}: Error getting local Borg version',
+                    error,
+                )
+                raise
 
 
-                for repo in config['repositories']:
-                    repo_queue.put(
-                        (repo, 0),
-                    )
+            for repo in config['repositories']:
+                repo_queue.put(
+                    (repo, 0),
+                )
 
 
-                while not repo_queue.empty():
-                    repository, retry_num = repo_queue.get()
-
-                    with Log_prefix(repository.get('label', repository['path'])):
-                        logger.debug('Running actions for repository')
-                        timeout = retry_num * retry_wait
-
-                        if timeout:
-                            logger.warning(f'Sleeping {timeout}s before next retry')
-                            time.sleep(timeout)
-
-                        try:
-                            yield from run_actions(
-                                arguments=arguments,
-                                config_filename=config_filename,
-                                config=config,
-                                config_paths=config_paths,
-                                local_path=local_path,
-                                remote_path=remote_path,
-                                local_borg_version=local_borg_version,
-                                repository=repository,
+            while not repo_queue.empty():
+                repository, retry_num = repo_queue.get()
+
+                with Log_prefix(repository.get('label', repository['path'])):
+                    logger.debug('Running actions for repository')
+                    timeout = retry_num * retry_wait
+
+                    if timeout:
+                        logger.warning(f'Sleeping {timeout}s before next retry')
+                        time.sleep(timeout)
+
+                    try:
+                        yield from run_actions(
+                            arguments=arguments,
+                            config_filename=config_filename,
+                            config=config,
+                            config_paths=config_paths,
+                            local_path=local_path,
+                            remote_path=remote_path,
+                            local_borg_version=local_borg_version,
+                            repository=repository,
+                        )
+                    except (OSError, CalledProcessError, ValueError) as error:
+                        if retry_num < retries:
+                            repo_queue.put(
+                                (repository, retry_num + 1),
                             )
                             )
-                        except (OSError, CalledProcessError, ValueError) as error:
-                            if retry_num < retries:
-                                repo_queue.put(
-                                    (repository, retry_num + 1),
-                                )
-                                tuple(  # Consume the generator so as to trigger logging.
-                                    log_error_records(
-                                        'Error running actions for repository',
-                                        error,
-                                        levelno=logging.WARNING,
-                                        log_command_error_output=True,
-                                    )
-                                )
-                                logger.warning(f'Retrying... attempt {retry_num + 1}/{retries}')
-                                continue
-
-                            if command.considered_soft_failure(error):
-                                continue
-
-                            yield from log_error_records(
-                                'Error running actions for repository',
-                                error,
+                            tuple(  # Consume the generator so as to trigger logging.
+                                log_error_records(
+                                    'Error running actions for repository',
+                                    error,
+                                    levelno=logging.WARNING,
+                                    log_command_error_output=True,
+                                ),
                             )
                             )
-                            encountered_error = error
-                            error_repository = repository
+                            logger.warning(f'Retrying... attempt {retry_num + 1}/{retries}')
+                            continue
+
+                        if command.considered_soft_failure(error):
+                            continue
+
+                        yield from log_error_records(
+                            'Error running actions for repository',
+                            error,
+                        )
+                        encountered_error = error
+                        error_repository = repository
 
 
-                # Re-raise any error, so that the Monitoring_hooks context manager wrapping this
-                # code can see the error and act accordingly. Do this here rather than as soon as
-                # the error is encountered so that an error with one repository doesn't prevent
-                # other repositories from running.
-                if encountered_error:
-                    raise encountered_error
+            # Re-raise any error, so that the Monitoring_hooks context manager wrapping this
+            # code can see the error and act accordingly. Do this here rather than as soon as
+            # the error is encountered so that an error with one repository doesn't prevent
+            # other repositories from running.
+            if encountered_error:
+                raise encountered_error
 
 
     except (OSError, CalledProcessError, ValueError) as error:
     except (OSError, CalledProcessError, ValueError) as error:
         yield from log_error_records('Error running configuration')
         yield from log_error_records('Error running configuration')
@@ -323,7 +326,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
         yield from log_error_records(f'{config_filename}: Error running after error hook', error)
         yield from log_error_records(f'{config_filename}: Error running after error hook', error)
 
 
 
 
-def run_actions(
+def run_actions(  # noqa: PLR0912, PLR0915
     *,
     *,
     arguments,
     arguments,
     config_filename,
     config_filename,
@@ -641,9 +644,9 @@ def load_configurations(config_filenames, arguments, overrides=None, resolve_env
                         levelno=logging.DEBUG,
                         levelno=logging.DEBUG,
                         levelname='DEBUG',
                         levelname='DEBUG',
                         msg=f'{config_filename}: Loading configuration file',
                         msg=f'{config_filename}: Loading configuration file',
-                    )
+                    ),
                 ),
                 ),
-            ]
+            ],
         )
         )
         try:
         try:
             configs[config_filename], paths, parse_logs = validate.parse_configuration(
             configs[config_filename], paths, parse_logs = validate.parse_configuration(
@@ -663,9 +666,9 @@ def load_configurations(config_filenames, arguments, overrides=None, resolve_env
                             levelno=logging.WARNING,
                             levelno=logging.WARNING,
                             levelname='WARNING',
                             levelname='WARNING',
                             msg=f'{config_filename}: Insufficient permissions to read configuration file',
                             msg=f'{config_filename}: Insufficient permissions to read configuration file',
-                        )
+                        ),
                     ),
                     ),
-                ]
+                ],
             )
             )
         except (ValueError, OSError, validate.Validation_error) as error:
         except (ValueError, OSError, validate.Validation_error) as error:
             logs.extend(
             logs.extend(
@@ -675,12 +678,12 @@ def load_configurations(config_filenames, arguments, overrides=None, resolve_env
                             levelno=logging.CRITICAL,
                             levelno=logging.CRITICAL,
                             levelname='CRITICAL',
                             levelname='CRITICAL',
                             msg=f'{config_filename}: Error parsing configuration file',
                             msg=f'{config_filename}: Error parsing configuration file',
-                        )
+                        ),
                     ),
                     ),
                     logging.makeLogRecord(
                     logging.makeLogRecord(
-                        dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=str(error))
+                        dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=str(error)),
                     ),
                     ),
-                ]
+                ],
             )
             )
 
 
     return (configs, sorted(config_paths), logs)
     return (configs, sorted(config_paths), logs)
@@ -703,7 +706,10 @@ BORG_REPOSITORY_ACCESS_ABORTED_EXIT_CODE = 62
 
 
 
 
 def log_error_records(
 def log_error_records(
-    message, error=None, levelno=logging.CRITICAL, log_command_error_output=False
+    message,
+    error=None,
+    levelno=logging.CRITICAL,
+    log_command_error_output=False,
 ):
 ):
     '''
     '''
     Given error message text, an optional exception object, an optional log level, and whether to
     Given error message text, an optional exception object, an optional log level, and whether to
@@ -721,14 +727,14 @@ def log_error_records(
 
 
     try:
     try:
         raise error
         raise error
-    except CalledProcessError as error:
+    except CalledProcessError as called_process_error:
         yield log_record(levelno=levelno, levelname=level_name, msg=str(message))
         yield log_record(levelno=levelno, levelname=level_name, msg=str(message))
 
 
-        if error.output:
+        if called_process_error.output:
             try:
             try:
-                output = error.output.decode('utf-8')
+                output = called_process_error.output.decode('utf-8')
             except (UnicodeDecodeError, AttributeError):
             except (UnicodeDecodeError, AttributeError):
-                output = error.output
+                output = called_process_error.output
 
 
             # Suppress these logs for now and save the error output for the log summary at the end.
             # Suppress these logs for now and save the error output for the log summary at the end.
             # Log a separate record per line, as some errors can be really verbose and overflow the
             # Log a separate record per line, as some errors can be really verbose and overflow the
@@ -741,17 +747,17 @@ def log_error_records(
                     suppress_log=True,
                     suppress_log=True,
                 )
                 )
 
 
-        yield log_record(levelno=levelno, levelname=level_name, msg=str(error))
+        yield log_record(levelno=levelno, levelname=level_name, msg=str(called_process_error))
 
 
-        if error.returncode == BORG_REPOSITORY_ACCESS_ABORTED_EXIT_CODE:
+        if called_process_error.returncode == BORG_REPOSITORY_ACCESS_ABORTED_EXIT_CODE:
             yield log_record(
             yield log_record(
                 levelno=levelno,
                 levelno=levelno,
                 levelname=level_name,
                 levelname=level_name,
                 msg='\nTo work around this, set either the "relocated_repo_access_is_ok" or "unknown_unencrypted_repo_access_is_ok" option to "true", as appropriate.',
                 msg='\nTo work around this, set either the "relocated_repo_access_is_ok" or "unknown_unencrypted_repo_access_is_ok" option to "true", as appropriate.',
             )
             )
-    except (ValueError, OSError) as error:
+    except (ValueError, OSError) as other_error:
         yield log_record(levelno=levelno, levelname=level_name, msg=str(message))
         yield log_record(levelno=levelno, levelname=level_name, msg=str(message))
-        yield log_record(levelno=levelno, levelname=level_name, msg=str(error))
+        yield log_record(levelno=levelno, levelname=level_name, msg=str(other_error))
     except:  # noqa: E722, S110
     except:  # noqa: E722, S110
         # Raising above only as a means of determining the error type. Swallow the exception here
         # Raising above only as a means of determining the error type. Swallow the exception here
         # because we don't want the exception to propagate out of this function.
         # because we don't want the exception to propagate out of this function.
@@ -783,7 +789,8 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
         try:
         try:
             # No configuration file is needed for bootstrap.
             # No configuration file is needed for bootstrap.
             local_borg_version = borg_version.local_borg_version(
             local_borg_version = borg_version.local_borg_version(
-                {}, arguments['bootstrap'].local_path
+                {},
+                arguments['bootstrap'].local_path,
             )
             )
         except (OSError, CalledProcessError, ValueError) as error:
         except (OSError, CalledProcessError, ValueError) as error:
             yield from log_error_records('Error getting local Borg version', error)
             yield from log_error_records('Error getting local Borg version', error)
@@ -791,14 +798,16 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
 
 
         try:
         try:
             borgmatic.actions.config.bootstrap.run_bootstrap(
             borgmatic.actions.config.bootstrap.run_bootstrap(
-                arguments['bootstrap'], arguments['global'], local_borg_version
+                arguments['bootstrap'],
+                arguments['global'],
+                local_borg_version,
             )
             )
             yield logging.makeLogRecord(
             yield logging.makeLogRecord(
                 dict(
                 dict(
                     levelno=logging.ANSWER,
                     levelno=logging.ANSWER,
                     levelname='ANSWER',
                     levelname='ANSWER',
                     msg='Bootstrap successful',
                     msg='Bootstrap successful',
-                )
+                ),
             )
             )
         except (
         except (
             CalledProcessError,
             CalledProcessError,
@@ -812,14 +821,15 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
     if 'generate' in arguments:
     if 'generate' in arguments:
         try:
         try:
             borgmatic.actions.config.generate.run_generate(
             borgmatic.actions.config.generate.run_generate(
-                arguments['generate'], arguments['global']
+                arguments['generate'],
+                arguments['global'],
             )
             )
             yield logging.makeLogRecord(
             yield logging.makeLogRecord(
                 dict(
                 dict(
                     levelno=logging.ANSWER,
                     levelno=logging.ANSWER,
                     levelname='ANSWER',
                     levelname='ANSWER',
                     msg='Generate successful',
                     msg='Generate successful',
-                )
+                ),
             )
             )
         except (
         except (
             CalledProcessError,
             CalledProcessError,
@@ -837,7 +847,7 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
                     levelno=logging.CRITICAL,
                     levelno=logging.CRITICAL,
                     levelname='CRITICAL',
                     levelname='CRITICAL',
                     msg='Configuration validation failed',
                     msg='Configuration validation failed',
-                )
+                ),
             )
             )
 
 
             return
             return
@@ -850,7 +860,7 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
                     levelno=logging.ANSWER,
                     levelno=logging.ANSWER,
                     levelname='ANSWER',
                     levelname='ANSWER',
                     msg='All configuration files are valid',
                     msg='All configuration files are valid',
-                )
+                ),
             )
             )
         except (
         except (
             CalledProcessError,
             CalledProcessError,
@@ -862,7 +872,7 @@ def collect_highlander_action_summary_logs(configs, arguments, configuration_par
         return
         return
 
 
 
 
-def collect_configuration_run_summary_logs(configs, config_paths, arguments, log_file_path):
+def collect_configuration_run_summary_logs(configs, config_paths, arguments, log_file_path):  # noqa: PLR0912
     '''
     '''
     Given a dict of configuration filename to corresponding parsed configuration, a sequence of
     Given a dict of configuration filename to corresponding parsed configuration, a sequence of
     loaded configuration paths, parsed command-line arguments as a dict from subparser name to a
     loaded configuration paths, parsed command-line arguments as a dict from subparser name to a
@@ -875,9 +885,9 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments, log
     # Run cross-file validation checks.
     # Run cross-file validation checks.
     repository = None
     repository = None
 
 
-    for action_name, action_arguments in arguments.items():
+    for action_arguments in arguments.values():
         if hasattr(action_arguments, 'repository'):
         if hasattr(action_arguments, 'repository'):
-            repository = getattr(action_arguments, 'repository')
+            repository = action_arguments.repository
             break
             break
 
 
     try:
     try:
@@ -942,7 +952,7 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments, log
                         levelno=logging.INFO,
                         levelno=logging.INFO,
                         levelname='INFO',
                         levelname='INFO',
                         msg=f'{config_filename}: Successfully ran configuration file',
                         msg=f'{config_filename}: Successfully ran configuration file',
-                    )
+                    ),
                 )
                 )
                 if results:
                 if results:
                     json_results.extend(results)
                     json_results.extend(results)
@@ -1031,17 +1041,17 @@ def get_singular_option_value(configs, option_name):
         configure_logging(logging.CRITICAL)
         configure_logging(logging.CRITICAL)
         joined_values = ', '.join(str(value) for value in distinct_values)
         joined_values = ', '.join(str(value) for value in distinct_values)
         logger.critical(
         logger.critical(
-            f'The {option_name} option has conflicting values across configuration files: {joined_values}'
+            f'The {option_name} option has conflicting values across configuration files: {joined_values}',
         )
         )
         exit_with_help_link()
         exit_with_help_link()
 
 
     try:
     try:
-        return tuple(distinct_values)[0]
-    except IndexError:
+        return next(iter(distinct_values))
+    except StopIteration:
         return None
         return None
 
 
 
 
-def main(extra_summary_logs=[]):  # pragma: no cover
+def main(extra_summary_logs=()):  # pragma: no cover
     configure_signals()
     configure_signals()
     configure_delayed_logging()
     configure_delayed_logging()
     schema_filename = validate.schema_filename()
     schema_filename = validate.schema_filename()
@@ -1070,15 +1080,15 @@ def main(extra_summary_logs=[]):  # pragma: no cover
     global_arguments = arguments['global']
     global_arguments = arguments['global']
 
 
     if global_arguments.version:
     if global_arguments.version:
-        print(importlib.metadata.version('borgmatic'))
+        print(importlib.metadata.version('borgmatic'))  # noqa: T201
         sys.exit(0)
         sys.exit(0)
 
 
     if global_arguments.bash_completion:
     if global_arguments.bash_completion:
-        print(borgmatic.commands.completion.bash.bash_completion())
+        print(borgmatic.commands.completion.bash.bash_completion())  # noqa: T201
         sys.exit(0)
         sys.exit(0)
 
 
     if global_arguments.fish_completion:
     if global_arguments.fish_completion:
-        print(borgmatic.commands.completion.fish.fish_completion())
+        print(borgmatic.commands.completion.fish.fish_completion())  # noqa: T201
         sys.exit(0)
         sys.exit(0)
 
 
     config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
     config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
@@ -1117,18 +1127,23 @@ def main(extra_summary_logs=[]):  # pragma: no cover
         exit_with_help_link()
         exit_with_help_link()
 
 
     summary_logs = (
     summary_logs = (
-        extra_summary_logs
+        list(extra_summary_logs)
         + parse_logs
         + parse_logs
         + (
         + (
             list(
             list(
                 collect_highlander_action_summary_logs(
                 collect_highlander_action_summary_logs(
-                    configs, arguments, configuration_parse_errors
-                )
+                    configs,
+                    arguments,
+                    configuration_parse_errors,
+                ),
             )
             )
             or list(
             or list(
                 collect_configuration_run_summary_logs(
                 collect_configuration_run_summary_logs(
-                    configs, config_paths, arguments, log_file_path
-                )
+                    configs,
+                    config_paths,
+                    arguments,
+                    log_file_path,
+                ),
             )
             )
         )
         )
     )
     )

+ 4 - 4
borgmatic/commands/completion/actions.py

@@ -22,15 +22,15 @@ def available_actions(subparsers, current_action=None):
     action of "config" but not "list".
     action of "config" but not "list".
     '''
     '''
     action_to_subactions = borgmatic.commands.arguments.get_subactions_for_actions(
     action_to_subactions = borgmatic.commands.arguments.get_subactions_for_actions(
-        subparsers.choices
+        subparsers.choices,
     )
     )
     current_subactions = action_to_subactions.get(current_action)
     current_subactions = action_to_subactions.get(current_action)
 
 
     if current_subactions:
     if current_subactions:
         return current_subactions
         return current_subactions
 
 
-    all_subactions = set(
+    all_subactions = {
         subaction for subactions in action_to_subactions.values() for subaction in subactions
         subaction for subactions in action_to_subactions.values() for subaction in subactions
-    )
+    }
 
 
-    return tuple(action for action in subparsers.choices.keys() if action not in all_subactions)
+    return tuple(action for action in subparsers.choices if action not in all_subactions)

+ 8 - 9
borgmatic/commands/completion/bash.py

@@ -23,7 +23,7 @@ def bash_completion():
     borgmatic's command-line argument parsers.
     borgmatic's command-line argument parsers.
     '''
     '''
     (
     (
-        unused_global_parser,
+        _,
         action_parsers,
         action_parsers,
         global_plus_action_parser,
         global_plus_action_parser,
     ) = borgmatic.commands.arguments.make_parsers(
     ) = borgmatic.commands.arguments.make_parsers(
@@ -33,6 +33,7 @@ def bash_completion():
     global_flags = parser_flags(global_plus_action_parser)
     global_flags = parser_flags(global_plus_action_parser)
 
 
     # Avert your eyes.
     # Avert your eyes.
+    # fmt: off
     return '\n'.join(
     return '\n'.join(
         (
         (
             'check_version() {',
             'check_version() {',
@@ -47,24 +48,22 @@ def bash_completion():
             '    fi',
             '    fi',
             '}',
             '}',
             'complete_borgmatic() {',
             'complete_borgmatic() {',
-        )
-        + tuple(
+            *tuple(
             '''    if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
             '''    if [[ " ${COMP_WORDS[*]} " =~ " %s " ]]; then
         COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
         COMPREPLY=($(compgen -W "%s %s %s" -- "${COMP_WORDS[COMP_CWORD]}"))
         return 0
         return 0
-    fi'''
+    fi'''  # noqa: UP031
             % (
             % (
                 action,
                 action,
                 parser_flags(action_parser),
                 parser_flags(action_parser),
                 ' '.join(
                 ' '.join(
-                    borgmatic.commands.completion.actions.available_actions(action_parsers, action)
+                    borgmatic.commands.completion.actions.available_actions(action_parsers, action),
                 ),
                 ),
                 global_flags,
                 global_flags,
             )
             )
             for action, action_parser in reversed(action_parsers.choices.items())
             for action, action_parser in reversed(action_parsers.choices.items())
-        )
-        + (
-            '    COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'  # noqa: FS003
+        ),
+            '    COMPREPLY=($(compgen -W "%s %s" -- "${COMP_WORDS[COMP_CWORD]}"))'  # noqa: UP031
             % (
             % (
                 ' '.join(borgmatic.commands.completion.actions.available_actions(action_parsers)),
                 ' '.join(borgmatic.commands.completion.actions.available_actions(action_parsers)),
                 global_flags,
                 global_flags,
@@ -72,5 +71,5 @@ def bash_completion():
             '    (check_version &)',
             '    (check_version &)',
             '}',
             '}',
             '\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
             '\ncomplete -o bashdefault -o default -F complete_borgmatic borgmatic',
-        )
+        ),
     )
     )

+ 23 - 22
borgmatic/commands/completion/fish.py

@@ -11,10 +11,10 @@ def has_file_options(action: Action):
     '''
     '''
     Given an argparse.Action instance, return True if it takes a file argument.
     Given an argparse.Action instance, return True if it takes a file argument.
     '''
     '''
-    return action.metavar in (
+    return action.metavar in {
         'FILENAME',
         'FILENAME',
         'PATH',
         'PATH',
-    ) or action.dest in ('config_paths',)
+    } or action.dest in {'config_paths'}
 
 
 
 
 def has_choice_options(action: Action):
 def has_choice_options(action: Action):
@@ -36,11 +36,11 @@ def has_unknown_required_param_options(action: Action):
     return (
     return (
         action.required is True
         action.required is True
         or action.nargs
         or action.nargs
-        in (
+        in {
             '+',
             '+',
             '*',
             '*',
-        )
-        or action.metavar in ('PATTERN', 'KEYS', 'N')
+        }
+        or action.metavar in {'PATTERN', 'KEYS', 'N'}
         or (action.type is not None and action.default is None)
         or (action.type is not None and action.default is None)
     )
     )
 
 
@@ -77,7 +77,7 @@ def exact_options_completion(action: Action):
         return f'''\ncomplete -c borgmatic -x -n "__borgmatic_current_arg {args}"'''
         return f'''\ncomplete -c borgmatic -x -n "__borgmatic_current_arg {args}"'''
 
 
     raise ValueError(
     raise ValueError(
-        f'Unexpected action: {action} passes has_exact_options but has no choices produced'
+        f'Unexpected action: {action} passes has_exact_options but has no choices produced',
     )
     )
 
 
 
 
@@ -96,7 +96,7 @@ def fish_completion():
     borgmatic's command-line argument parsers.
     borgmatic's command-line argument parsers.
     '''
     '''
     (
     (
-        unused_global_parser,
+        _,
         action_parsers,
         action_parsers,
         global_plus_action_parser,
         global_plus_action_parser,
     ) = borgmatic.commands.arguments.make_parsers(
     ) = borgmatic.commands.arguments.make_parsers(
@@ -104,7 +104,7 @@ def fish_completion():
         unparsed_arguments=(),
         unparsed_arguments=(),
     )
     )
 
 
-    all_action_parsers = ' '.join(action for action in action_parsers.choices.keys())
+    all_action_parsers = ' '.join(action for action in action_parsers.choices)
 
 
     exact_option_args = tuple(
     exact_option_args = tuple(
         ' '.join(action.option_strings)
         ' '.join(action.option_strings)
@@ -119,8 +119,9 @@ def fish_completion():
     )
     )
 
 
     # Avert your eyes.
     # Avert your eyes.
-    return '\n'.join(
-        dedent_strip_as_tuple(
+    # fmt: off
+    return '\n'.join((
+        *dedent_strip_as_tuple(
             f'''
             f'''
             function __borgmatic_check_version
             function __borgmatic_check_version
                 set -fx this_filename (status current-filename)
                 set -fx this_filename (status current-filename)
@@ -157,27 +158,27 @@ def fish_completion():
 
 
             set --local action_parser_condition "not __fish_seen_subcommand_from {all_action_parsers}"
             set --local action_parser_condition "not __fish_seen_subcommand_from {all_action_parsers}"
             set --local exact_option_condition "not __borgmatic_current_arg {' '.join(exact_option_args)}"
             set --local exact_option_condition "not __borgmatic_current_arg {' '.join(exact_option_args)}"
-            '''
-        )
-        + ('\n# action_parser completions',)
-        + tuple(
+            ''',
+        ),
+        '\n# action_parser completions',
+        *tuple(
             f'''complete -c borgmatic -f -n "$action_parser_condition" -n "$exact_option_condition" -a '{action_name}' -d {shlex.quote(action_parser.description)}'''
             f'''complete -c borgmatic -f -n "$action_parser_condition" -n "$exact_option_condition" -a '{action_name}' -d {shlex.quote(action_parser.description)}'''
             for action_name, action_parser in action_parsers.choices.items()
             for action_name, action_parser in action_parsers.choices.items()
-        )
-        + ('\n# global flags',)
-        + tuple(
+        ),
+        '\n# global flags',
+        *tuple(
             # -n is checked in order, so put faster / more likely to be true checks first
             # -n is checked in order, so put faster / more likely to be true checks first
             f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)}{exact_options_completion(action)}'''
             f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)}{exact_options_completion(action)}'''
             for action in global_plus_action_parser._actions
             for action in global_plus_action_parser._actions
             # ignore the noargs action, as this is an impossible completion for fish
             # ignore the noargs action, as this is an impossible completion for fish
             if len(action.option_strings) > 0
             if len(action.option_strings) > 0
             if 'Deprecated' not in action.help
             if 'Deprecated' not in action.help
-        )
-        + ('\n# action_parser flags',)
-        + tuple(
+        ),
+        '\n# action_parser flags',
+        *tuple(
             f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)} -n "__fish_seen_subcommand_from {action_name}"{exact_options_completion(action)}'''
             f'''complete -c borgmatic -f -n "$exact_option_condition" -a '{' '.join(action.option_strings)}' -d {shlex.quote(action.help)} -n "__fish_seen_subcommand_from {action_name}"{exact_options_completion(action)}'''
             for action_name, action_parser in action_parsers.choices.items()
             for action_name, action_parser in action_parsers.choices.items()
             for action in action_parser._actions
             for action in action_parser._actions
             if 'Deprecated' not in (action.help or ())
             if 'Deprecated' not in (action.help or ())
-        )
-    )
+        ),
+    ))

+ 1 - 1
borgmatic/commands/completion/flag.py

@@ -5,7 +5,7 @@ def variants(flag_name):
     "--foo[9].bar".
     "--foo[9].bar".
     '''
     '''
     if '[0]' in flag_name:
     if '[0]' in flag_name:
-        for index in range(0, 10):
+        for index in range(10):
             yield flag_name.replace('[0]', f'[{index}]')
             yield flag_name.replace('[0]', f'[{index}]')
 
 
         return
         return

+ 2 - 2
borgmatic/commands/generate_config.py

@@ -10,8 +10,8 @@ def main():
             levelno=logging.WARNING,
             levelno=logging.WARNING,
             levelname='WARNING',
             levelname='WARNING',
             msg='generate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config generate" instead.',
             msg='generate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config generate" instead.',
-        )
+        ),
     )
     )
 
 
-    sys.argv = ['borgmatic', 'config', 'generate'] + sys.argv[1:]
+    sys.argv = ['borgmatic', 'config', 'generate', *sys.argv[1:]]
     borgmatic.commands.borgmatic.main([warning_log])
     borgmatic.commands.borgmatic.main([warning_log])

+ 2 - 2
borgmatic/commands/validate_config.py

@@ -10,8 +10,8 @@ def main():
             levelno=logging.WARNING,
             levelno=logging.WARNING,
             levelname='WARNING',
             levelname='WARNING',
             msg='validate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config validate" instead.',
             msg='validate-borgmatic-config is deprecated and will be removed from a future release. Please use "borgmatic config validate" instead.',
-        )
+        ),
     )
     )
 
 
-    sys.argv = ['borgmatic', 'config', 'validate'] + sys.argv[1:]
+    sys.argv = ['borgmatic', 'config', 'validate', *sys.argv[1:]]
     borgmatic.commands.borgmatic.main([warning_log])
     borgmatic.commands.borgmatic.main([warning_log])

+ 1 - 1
borgmatic/config/arguments.py

@@ -155,7 +155,7 @@ def prepare_arguments_for_config(global_arguments, schema):
             (
             (
                 keys,
                 keys,
                 convert_value_type(value, option_type),
                 convert_value_type(value, option_type),
-            )
+            ),
         )
         )
 
 
     return tuple(prepared_values)
     return tuple(prepared_values)

+ 1 - 1
borgmatic/config/collect.py

@@ -49,6 +49,6 @@ def collect_config_filenames(config_paths):
 
 
         for filename in sorted(os.listdir(path)):
         for filename in sorted(os.listdir(path)):
             full_filename = os.path.join(path, filename)
             full_filename = os.path.join(path, filename)
-            matching_filetype = full_filename.endswith('.yaml') or full_filename.endswith('.yml')
+            matching_filetype = full_filename.endswith(('.yaml', '.yml'))
             if matching_filetype and not os.path.isdir(full_filename):
             if matching_filetype and not os.path.isdir(full_filename):
                 yield os.path.abspath(full_filename)
                 yield os.path.abspath(full_filename)

+ 12 - 12
borgmatic/config/constants.py

@@ -1,3 +1,4 @@
+import contextlib
 import shlex
 import shlex
 
 
 
 
@@ -6,18 +7,18 @@ def coerce_scalar(value):
     Given a configuration value, coerce it to an integer or a boolean as appropriate and return the
     Given a configuration value, coerce it to an integer or a boolean as appropriate and return the
     result.
     result.
     '''
     '''
-    try:
+    with contextlib.suppress(TypeError, ValueError):
         return int(value)
         return int(value)
-    except (TypeError, ValueError):
-        pass
-
-    if value == 'true' or value == 'True':
-        return True
-
-    if value == 'false' or value == 'False':
-        return False
 
 
-    return value
+    try:
+        return {
+            'true': True,
+            'True': True,
+            'false': False,
+            'False': False,
+        }.get(value, value)
+    except TypeError:  # e.g. for an unhashable type
+        return value
 
 
 
 
 def apply_constants(value, constants, shell_escape=False):
 def apply_constants(value, constants, shell_escape=False):
@@ -56,8 +57,7 @@ def apply_constants(value, constants, shell_escape=False):
                 constants,
                 constants,
                 shell_escape=(
                 shell_escape=(
                     shell_escape
                     shell_escape
-                    or option_name.startswith('before_')
-                    or option_name.startswith('after_')
+                    or option_name.startswith(('before_', 'after_'))
                     or option_name == 'on_error'
                     or option_name == 'on_error'
                 ),
                 ),
             )
             )

+ 1 - 1
borgmatic/config/environment.py

@@ -2,7 +2,7 @@ import os
 import re
 import re
 
 
 VARIABLE_PATTERN = re.compile(
 VARIABLE_PATTERN = re.compile(
-    r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})'
+    r'(?P<escape>\\)?(?P<variable>\$\{(?P<name>[A-Za-z0-9_]+)((:?-)(?P<default>[^}]+))?\})',
 )
 )
 
 
 
 

+ 45 - 29
borgmatic/config/generate.py

@@ -1,4 +1,5 @@
 import collections
 import collections
+import contextlib
 import io
 import io
 import os
 import os
 import re
 import re
@@ -18,7 +19,8 @@ def insert_newline_before_comment(config, field_name):
     field and its comments.
     field and its comments.
     '''
     '''
     config.ca.items[field_name][1].insert(
     config.ca.items[field_name][1].insert(
-        0, ruamel.yaml.tokens.CommentToken('\n', ruamel.yaml.error.CommentMark(0), None)
+        0,
+        ruamel.yaml.tokens.CommentToken('\n', ruamel.yaml.error.CommentMark(0), None),
     )
     )
 
 
 
 
@@ -40,13 +42,17 @@ def schema_to_sample_configuration(schema, source_config=None, level=0, parent_i
         config = ruamel.yaml.comments.CommentedSeq(
         config = ruamel.yaml.comments.CommentedSeq(
             example
             example
             if borgmatic.config.schema.compare_types(
             if borgmatic.config.schema.compare_types(
-                schema['items'].get('type'), SCALAR_SCHEMA_TYPES
+                schema['items'].get('type'),
+                SCALAR_SCHEMA_TYPES,
             )
             )
             else [
             else [
                 schema_to_sample_configuration(
                 schema_to_sample_configuration(
-                    schema['items'], source_config, level, parent_is_sequence=True
-                )
-            ]
+                    schema['items'],
+                    source_config,
+                    level,
+                    parent_is_sequence=True,
+                ),
+            ],
         )
         )
         add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
         add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
     elif borgmatic.config.schema.compare_types(schema_type, {'object'}):
     elif borgmatic.config.schema.compare_types(schema_type, {'object'}):
@@ -59,19 +65,25 @@ def schema_to_sample_configuration(schema, source_config=None, level=0, parent_i
                     (
                     (
                         field_name,
                         field_name,
                         schema_to_sample_configuration(
                         schema_to_sample_configuration(
-                            sub_schema, (source_config or {}).get(field_name, {}), level + 1
+                            sub_schema,
+                            (source_config or {}).get(field_name, {}),
+                            level + 1,
                         ),
                         ),
                     )
                     )
                     for field_name, sub_schema in borgmatic.config.schema.get_properties(
                     for field_name, sub_schema in borgmatic.config.schema.get_properties(
-                        schema
+                        schema,
                     ).items()
                     ).items()
-                ]
+                ],
             )
             )
             or example
             or example
         )
         )
         indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
         indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
         add_comments_to_configuration_object(
         add_comments_to_configuration_object(
-            config, schema, source_config, indent=indent, skip_first_field=parent_is_sequence
+            config,
+            schema,
+            source_config,
+            indent=indent,
+            skip_first_field=parent_is_sequence,
         )
         )
     elif borgmatic.config.schema.compare_types(schema_type, SCALAR_SCHEMA_TYPES, match=all):
     elif borgmatic.config.schema.compare_types(schema_type, SCALAR_SCHEMA_TYPES, match=all):
         return example
         return example
@@ -121,12 +133,8 @@ def comment_out_optional_configuration(rendered_config):
             indent_characters_at_sentinel = indent_characters
             indent_characters_at_sentinel = indent_characters
             continue
             continue
 
 
-        # Hit a blank line, so reset commenting.
-        if not line.strip():
-            optional = False
-            indent_characters_at_sentinel = None
-        # Dedented, so reset commenting.
-        elif (
+        # Hit a blank line or dedented, so reset commenting.
+        if not line.strip() or (
             indent_characters_at_sentinel is not None
             indent_characters_at_sentinel is not None
             and indent_characters < indent_characters_at_sentinel
             and indent_characters < indent_characters_at_sentinel
         ):
         ):
@@ -158,15 +166,13 @@ def write_configuration(config_filename, rendered_config, mode=0o600, overwrite=
     '''
     '''
     if not overwrite and os.path.exists(config_filename):
     if not overwrite and os.path.exists(config_filename):
         raise FileExistsError(
         raise FileExistsError(
-            f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.'
+            f'{config_filename} already exists. Aborting. Use --overwrite to replace the file.',
         )
         )
 
 
-    try:
+    with contextlib.suppress(FileExistsError, FileNotFoundError):
         os.makedirs(os.path.dirname(config_filename), mode=0o700)
         os.makedirs(os.path.dirname(config_filename), mode=0o700)
-    except (FileExistsError, FileNotFoundError):
-        pass
 
 
-    with open(config_filename, 'w') as config_file:
+    with open(config_filename, 'w', encoding='utf-8') as config_file:
         config_file.write(rendered_config)
         config_file.write(rendered_config)
 
 
     os.chmod(config_filename, mode)
     os.chmod(config_filename, mode)
@@ -191,7 +197,7 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
     if schema['items'].get('type') != 'object':
     if schema['items'].get('type') != 'object':
         return
         return
 
 
-    for field_name in config[0].keys():
+    for field_name in config[0]:
         field_schema = borgmatic.config.schema.get_properties(schema['items']).get(field_name, {})
         field_schema = borgmatic.config.schema.get_properties(schema['items']).get(field_name, {})
         description = field_schema.get('description')
         description = field_schema.get('description')
 
 
@@ -211,7 +217,11 @@ COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
 
 
 
 
 def add_comments_to_configuration_object(
 def add_comments_to_configuration_object(
-    config, schema, source_config=None, indent=0, skip_first_field=False
+    config,
+    schema,
+    source_config=None,
+    indent=0,
+    skip_first_field=False,
 ):
 ):
     '''
     '''
     Using descriptions from a schema as a source, add those descriptions as comments to the given
     Using descriptions from a schema as a source, add those descriptions as comments to the given
@@ -239,7 +249,7 @@ def add_comments_to_configuration_object(
             source_config is None or field_name not in source_config
             source_config is None or field_name not in source_config
         ):
         ):
             description = (
             description = (
-                '\n'.join((description, COMMENTED_OUT_SENTINEL))
+                f'{description}\n{COMMENTED_OUT_SENTINEL}'
                 if description
                 if description
                 else COMMENTED_OUT_SENTINEL
                 else COMMENTED_OUT_SENTINEL
             )
             )
@@ -275,7 +285,8 @@ def merge_source_configuration_into_destination(destination_config, source_confi
         # This is a mapping. Recurse for this key/value.
         # This is a mapping. Recurse for this key/value.
         if isinstance(source_value, collections.abc.Mapping):
         if isinstance(source_value, collections.abc.Mapping):
             destination_config[field_name] = merge_source_configuration_into_destination(
             destination_config[field_name] = merge_source_configuration_into_destination(
-                destination_config[field_name], source_value
+                destination_config[field_name],
+                source_value,
             )
             )
             continue
             continue
 
 
@@ -289,18 +300,22 @@ def merge_source_configuration_into_destination(destination_config, source_confi
                         source_item,
                         source_item,
                     )
                     )
                     for index, source_item in enumerate(source_value)
                     for index, source_item in enumerate(source_value)
-                ]
+                ],
             )
             )
             continue
             continue
 
 
         # This is some sort of scalar. Set it into the destination.
         # This is some sort of scalar. Set it into the destination.
-        destination_config[field_name] = source_config[field_name]
+        destination_config[field_name] = source_value
 
 
     return destination_config
     return destination_config
 
 
 
 
 def generate_sample_configuration(
 def generate_sample_configuration(
-    dry_run, source_filename, destination_filename, schema_filename, overwrite=False
+    dry_run,
+    source_filename,
+    destination_filename,
+    schema_filename,
+    overwrite=False,
 ):
 ):
     '''
     '''
     Given an optional source configuration filename, and a required destination configuration
     Given an optional source configuration filename, and a required destination configuration
@@ -309,7 +324,7 @@ def generate_sample_configuration(
     schema. If a source filename is provided, merge the parsed contents of that configuration into
     schema. If a source filename is provided, merge the parsed contents of that configuration into
     the generated configuration.
     the generated configuration.
     '''
     '''
-    schema = ruamel.yaml.YAML(typ='safe').load(open(schema_filename))
+    schema = ruamel.yaml.YAML(typ='safe').load(open(schema_filename, encoding='utf-8'))
     source_config = None
     source_config = None
 
 
     if source_filename:
     if source_filename:
@@ -323,7 +338,8 @@ def generate_sample_configuration(
             del source_config['bootstrap']
             del source_config['bootstrap']
 
 
     destination_config = merge_source_configuration_into_destination(
     destination_config = merge_source_configuration_into_destination(
-        schema_to_sample_configuration(schema, source_config), source_config
+        schema_to_sample_configuration(schema, source_config),
+        source_config,
     )
     )
 
 
     if dry_run:
     if dry_run:

+ 21 - 16
borgmatic/config/load.py

@@ -31,7 +31,7 @@ def probe_and_include_file(filename, include_directories, config_paths):
             return load_configuration(candidate_filename, config_paths)
             return load_configuration(candidate_filename, config_paths)
 
 
     raise FileNotFoundError(
     raise FileNotFoundError(
-        f'Could not find include {filename} at {" or ".join(candidate_filenames)}'
+        f'Could not find include {filename} at {" or ".join(candidate_filenames)}',
     )
     )
 
 
 
 
@@ -69,7 +69,7 @@ def include_configuration(loader, filename_node, include_directory, config_paths
         ]
         ]
 
 
     raise ValueError(
     raise ValueError(
-        'The value given for the !include tag is invalid; use a single filename or a list of filenames instead'
+        'The value given for the !include tag is invalid; use a single filename or a list of filenames instead',
     )
     )
 
 
 
 
@@ -85,7 +85,7 @@ def raise_retain_node_error(loader, node):
     '''
     '''
     if isinstance(node, (ruamel.yaml.nodes.MappingNode, ruamel.yaml.nodes.SequenceNode)):
     if isinstance(node, (ruamel.yaml.nodes.MappingNode, ruamel.yaml.nodes.SequenceNode)):
         raise ValueError(
         raise ValueError(
-            'The !retain tag may only be used within a configuration file containing a merged !include tag.'
+            'The !retain tag may only be used within a configuration file containing a merged !include tag.',
         )
         )
 
 
     raise ValueError('The !retain tag may only be used on a mapping or list.')
     raise ValueError('The !retain tag may only be used on a mapping or list.')
@@ -100,7 +100,7 @@ def raise_omit_node_error(loader, node):
     tags are handled by deep_merge_nodes() below.
     tags are handled by deep_merge_nodes() below.
     '''
     '''
     raise ValueError(
     raise ValueError(
-        'The !omit tag may only be used on a scalar (e.g., string) or list element within a configuration file containing a merged !include tag.'
+        'The !omit tag may only be used on a scalar (e.g., string) or list element within a configuration file containing a merged !include tag.',
     )
     )
 
 
 
 
@@ -111,9 +111,13 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
     '''
     '''
 
 
     def __init__(
     def __init__(
-        self, preserve_quotes=None, loader=None, include_directory=None, config_paths=None
+        self,
+        preserve_quotes=None,
+        loader=None,
+        include_directory=None,
+        config_paths=None,
     ):
     ):
-        super(Include_constructor, self).__init__(preserve_quotes, loader)
+        super().__init__(preserve_quotes, loader)
         self.add_constructor(
         self.add_constructor(
             '!include',
             '!include',
             functools.partial(
             functools.partial(
@@ -147,7 +151,7 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
         representer = ruamel.yaml.representer.SafeRepresenter()
         representer = ruamel.yaml.representer.SafeRepresenter()
 
 
         for index, (key_node, value_node) in enumerate(node.value):
         for index, (key_node, value_node) in enumerate(node.value):
-            if key_node.tag == u'tag:yaml.org,2002:merge' and value_node.tag == '!include':
+            if key_node.tag == 'tag:yaml.org,2002:merge' and value_node.tag == '!include':
                 # Replace the merge include with a sequence of included configuration nodes ready
                 # Replace the merge include with a sequence of included configuration nodes ready
                 # for merging. The construct_object() call here triggers include_configuration()
                 # for merging. The construct_object() call here triggers include_configuration()
                 # among other constructors.
                 # among other constructors.
@@ -157,7 +161,7 @@ class Include_constructor(ruamel.yaml.SafeConstructor):
                 )
                 )
 
 
         # This super().flatten_mapping() call actually performs "<<" merges.
         # This super().flatten_mapping() call actually performs "<<" merges.
-        super(Include_constructor, self).flatten_mapping(node)
+        super().flatten_mapping(node)
 
 
         node.value = deep_merge_nodes(node.value)
         node.value = deep_merge_nodes(node.value)
 
 
@@ -179,7 +183,7 @@ def load_configuration(filename, config_paths=None):
     # because yaml.Constructor has to be an actual class.)
     # because yaml.Constructor has to be an actual class.)
     class Include_constructor_with_extras(Include_constructor):
     class Include_constructor_with_extras(Include_constructor):
         def __init__(self, preserve_quotes=None, loader=None):
         def __init__(self, preserve_quotes=None, loader=None):
-            super(Include_constructor_with_extras, self).__init__(
+            super().__init__(
                 preserve_quotes,
                 preserve_quotes,
                 loader,
                 loader,
                 include_directory=os.path.dirname(filename),
                 include_directory=os.path.dirname(filename),
@@ -190,7 +194,7 @@ def load_configuration(filename, config_paths=None):
     yaml.Constructor = Include_constructor_with_extras
     yaml.Constructor = Include_constructor_with_extras
     config_paths.add(filename)
     config_paths.add(filename)
 
 
-    with open(filename) as file:
+    with open(filename, encoding='utf-8') as file:
         return yaml.load(file.read())
         return yaml.load(file.read())
 
 
 
 
@@ -318,17 +322,18 @@ def deep_merge_nodes(nodes):
 
 
     # Bucket the nodes by their keys. Then merge all of the values sharing the same key.
     # Bucket the nodes by their keys. Then merge all of the values sharing the same key.
     for key_name, grouped_nodes in itertools.groupby(
     for key_name, grouped_nodes in itertools.groupby(
-        sorted(nodes, key=get_node_key_name), get_node_key_name
+        sorted(nodes, key=get_node_key_name),
+        get_node_key_name,
     ):
     ):
-        grouped_nodes = list(grouped_nodes)
+        grouped_nodes = list(grouped_nodes)  # noqa: PLW2901
 
 
         # The merged node inherits its attributes from the final node in the group.
         # The merged node inherits its attributes from the final node in the group.
         (last_node_key, last_node_value) = grouped_nodes[-1]
         (last_node_key, last_node_value) = grouped_nodes[-1]
-        value_types = set(type(value) for (_, value) in grouped_nodes)
+        value_types = {type(value) for (_, value) in grouped_nodes}
 
 
         if len(value_types) > 1:
         if len(value_types) > 1:
             raise ValueError(
             raise ValueError(
-                f'Incompatible types found when trying to merge "{key_name}:" values across configuration files: {", ".join(value_type.id for value_type in value_types)}'
+                f'Incompatible types found when trying to merge "{key_name}:" values across configuration files: {", ".join(value_type.id for value_type in value_types)}',
             )
             )
 
 
         # If we're dealing with MappingNodes, recurse and merge its values as well.
         # If we're dealing with MappingNodes, recurse and merge its values as well.
@@ -351,7 +356,7 @@ def deep_merge_nodes(nodes):
                             comment=last_node_value.comment,
                             comment=last_node_value.comment,
                             anchor=last_node_value.anchor,
                             anchor=last_node_value.anchor,
                         ),
                         ),
-                    )
+                    ),
                 )
                 )
 
 
             continue
             continue
@@ -374,7 +379,7 @@ def deep_merge_nodes(nodes):
                             comment=last_node_value.comment,
                             comment=last_node_value.comment,
                             anchor=last_node_value.anchor,
                             anchor=last_node_value.anchor,
                         ),
                         ),
-                    )
+                    ),
                 )
                 )
 
 
             continue
             continue

+ 44 - 50
borgmatic/config/normalize.py

@@ -25,12 +25,12 @@ def normalize_sections(config_filename, config):
         and location.get('prefix') != consistency.get('prefix')
         and location.get('prefix') != consistency.get('prefix')
     ):
     ):
         raise ValueError(
         raise ValueError(
-            'The retention prefix and the consistency prefix cannot have different values (unless one is not set).'
+            'The retention prefix and the consistency prefix cannot have different values (unless one is not set).',
         )
         )
 
 
     if storage.get('umask') and hooks.get('umask') and storage.get('umask') != hooks.get('umask'):
     if storage.get('umask') and hooks.get('umask') and storage.get('umask') != hooks.get('umask'):
         raise ValueError(
         raise ValueError(
-            'The storage umask and the hooks umask cannot have different values (unless one is not set).'
+            'The storage umask and the hooks umask cannot have different values (unless one is not set).',
         )
         )
 
 
     any_section_upgraded = False
     any_section_upgraded = False
@@ -51,8 +51,8 @@ def normalize_sections(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: Configuration sections (like location:, storage:, retention:, consistency:, and hooks:) are deprecated and support will be removed from a future release. To prepare for this, move your options out of sections to the global scope.',
                     msg=f'{config_filename}: Configuration sections (like location:, storage:, retention:, consistency:, and hooks:) are deprecated and support will be removed from a future release. To prepare for this, move your options out of sections to the global scope.',
-                )
-            )
+                ),
+            ),
         ]
         ]
 
 
     return []
     return []
@@ -68,7 +68,7 @@ def make_command_hook_deprecation_log(config_filename, option_name):  # pragma:
             levelno=logging.WARNING,
             levelno=logging.WARNING,
             levelname='WARNING',
             levelname='WARNING',
             msg=f'{config_filename}: {option_name} is deprecated and support will be removed from a future release. Use commands: instead.',
             msg=f'{config_filename}: {option_name} is deprecated and support will be removed from a future release. Use commands: instead.',
-        )
+        ),
     )
     )
 
 
 
 
@@ -90,7 +90,7 @@ def normalize_commands(config_filename, config):
                 {
                 {
                     preposition: 'repository',
                     preposition: 'repository',
                     'run': commands,
                     'run': commands,
-                }
+                },
             )
             )
 
 
     # Normalize "before_backup", "before_prune", "after_backup", "after_prune", etc.
     # Normalize "before_backup", "before_prune", "after_backup", "after_prune", etc.
@@ -108,7 +108,7 @@ def normalize_commands(config_filename, config):
                     preposition: 'action',
                     preposition: 'action',
                     'when': [action_name],
                     'when': [action_name],
                     'run': commands,
                     'run': commands,
-                }
+                },
             )
             )
 
 
     # Normalize "on_error".
     # Normalize "on_error".
@@ -121,7 +121,7 @@ def normalize_commands(config_filename, config):
                 'after': 'error',
                 'after': 'error',
                 'when': ['create', 'prune', 'compact', 'check'],
                 'when': ['create', 'prune', 'compact', 'check'],
                 'run': commands,
                 'run': commands,
-            }
+            },
         )
         )
 
 
     # Normalize "before_everything" and "after_everything".
     # Normalize "before_everything" and "after_everything".
@@ -136,13 +136,13 @@ def normalize_commands(config_filename, config):
                     preposition: 'everything',
                     preposition: 'everything',
                     'when': ['create'],
                     'when': ['create'],
                     'run': commands,
                     'run': commands,
-                }
+                },
             )
             )
 
 
     return logs
     return logs
 
 
 
 
-def normalize(config_filename, config):
+def normalize(config_filename, config):  # noqa: PLR0912, PLR0915
     '''
     '''
     Given a configuration filename and a configuration dict of its loaded contents, apply particular
     Given a configuration filename and a configuration dict of its loaded contents, apply particular
     hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
     hard-coded rules to normalize the configuration to adhere to the current schema. Return any log
@@ -160,8 +160,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The borgmatic_source_directory option is deprecated and will be removed from a future release. Use user_runtime_directory and user_state_directory instead.',
                     msg=f'{config_filename}: The borgmatic_source_directory option is deprecated and will be removed from a future release. Use user_runtime_directory and user_state_directory instead.',
-                )
-            )
+                ),
+            ),
         )
         )
 
 
     # Upgrade exclude_if_present from a string to a list.
     # Upgrade exclude_if_present from a string to a list.
@@ -173,8 +173,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The exclude_if_present option now expects a list value. String values for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The exclude_if_present option now expects a list value. String values for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['exclude_if_present'] = [exclude_if_present]
         config['exclude_if_present'] = [exclude_if_present]
 
 
@@ -191,8 +191,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The store_config_files option has moved under the bootstrap hook. Specifying store_config_files at the global scope is deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The store_config_files option has moved under the bootstrap hook. Specifying store_config_files at the global scope is deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         del config['store_config_files']
         del config['store_config_files']
         config['bootstrap']['store_config_files'] = store_config_files
         config['bootstrap']['store_config_files'] = store_config_files
@@ -206,8 +206,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The healthchecks hook now expects a key/value pair with "ping_url" as a key. String values for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The healthchecks hook now expects a key/value pair with "ping_url" as a key. String values for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['healthchecks'] = {'ping_url': healthchecks}
         config['healthchecks'] = {'ping_url': healthchecks}
 
 
@@ -219,8 +219,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['cronitor'] = {'ping_url': cronitor}
         config['cronitor'] = {'ping_url': cronitor}
 
 
@@ -232,8 +232,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['pagerduty'] = {'integration_key': pagerduty}
         config['pagerduty'] = {'integration_key': pagerduty}
 
 
@@ -245,8 +245,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The healthchecks hook now expects key/value pairs. String values for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['cronhub'] = {'ping_url': cronhub}
         config['cronhub'] = {'ping_url': cronhub}
 
 
@@ -259,8 +259,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The checks option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The checks option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['checks'] = [{'name': check_type} for check_type in checks]
         config['checks'] = [{'name': check_type} for check_type in checks]
 
 
@@ -273,8 +273,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The numeric_owner option has been renamed to numeric_ids. numeric_owner is deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The numeric_owner option has been renamed to numeric_ids. numeric_owner is deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['numeric_ids'] = numeric_owner
         config['numeric_ids'] = numeric_owner
 
 
@@ -286,8 +286,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The bsd_flags option has been renamed to flags. bsd_flags is deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The bsd_flags option has been renamed to flags. bsd_flags is deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['flags'] = bsd_flags
         config['flags'] = bsd_flags
 
 
@@ -299,8 +299,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The remote_rate_limit option has been renamed to upload_rate_limit. remote_rate_limit is deprecated and support will be removed from a future release.',
                     msg=f'{config_filename}: The remote_rate_limit option has been renamed to upload_rate_limit. remote_rate_limit is deprecated and support will be removed from a future release.',
-                )
-            )
+                ),
+            ),
         )
         )
         config['upload_rate_limit'] = remote_rate_limit
         config['upload_rate_limit'] = remote_rate_limit
 
 
@@ -314,8 +314,8 @@ def normalize(config_filename, config):
                         levelno=logging.WARNING,
                         levelno=logging.WARNING,
                         levelname='WARNING',
                         levelname='WARNING',
                         msg=f'{config_filename}: The repositories option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
                         msg=f'{config_filename}: The repositories option now expects a list of key/value pairs. Lists of strings for this option are deprecated and support will be removed from a future release.',
-                    )
-                )
+                    ),
+                ),
             )
             )
             config['repositories'] = [
             config['repositories'] = [
                 {'path': repository} if isinstance(repository, str) else repository
                 {'path': repository} if isinstance(repository, str) else repository
@@ -338,28 +338,22 @@ def normalize(config_filename, config):
                             levelno=logging.WARNING,
                             levelno=logging.WARNING,
                             levelname='WARNING',
                             levelname='WARNING',
                             msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and support will be removed from a future release.',
                             msg=f'{config_filename}: Repository paths containing "~" are deprecated in borgmatic and support will be removed from a future release.',
-                        )
-                    )
+                        ),
+                    ),
                 )
                 )
 
 
             if ':' in repository_path:
             if ':' in repository_path:
                 if repository_path.startswith('file://'):
                 if repository_path.startswith('file://'):
                     updated_repository_path = os.path.abspath(
                     updated_repository_path = os.path.abspath(
-                        repository_path.partition('file://')[-1]
+                        repository_path.partition('file://')[-1],
                     )
                     )
                     config['repositories'].append(
                     config['repositories'].append(
                         dict(
                         dict(
                             repository_dict,
                             repository_dict,
                             path=updated_repository_path,
                             path=updated_repository_path,
-                        )
+                        ),
                     )
                     )
-                elif (
-                    repository_path.startswith('ssh://')
-                    or repository_path.startswith('sftp://')
-                    or repository_path.startswith('rclone:')
-                    or repository_path.startswith('s3:')
-                    or repository_path.startswith('b2:')
-                ):
+                elif repository_path.startswith(('ssh://', 'sftp://', 'rclone:', 's3:', 'b2:')):
                     config['repositories'].append(repository_dict)
                     config['repositories'].append(repository_dict)
                 else:
                 else:
                     rewritten_repository_path = f"ssh://{repository_path.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
                     rewritten_repository_path = f"ssh://{repository_path.replace(':~', '/~').replace(':/', '/').replace(':', '/./')}"
@@ -369,14 +363,14 @@ def normalize(config_filename, config):
                                 levelno=logging.WARNING,
                                 levelno=logging.WARNING,
                                 levelname='WARNING',
                                 levelname='WARNING',
                                 msg=f'{config_filename}: Remote repository paths without ssh://, sftp://, rclone:, s3:, or b2:, syntax are deprecated and support will be removed from a future release. Interpreting "{repository_path}" as "{rewritten_repository_path}"',
                                 msg=f'{config_filename}: Remote repository paths without ssh://, sftp://, rclone:, s3:, or b2:, syntax are deprecated and support will be removed from a future release. Interpreting "{repository_path}" as "{rewritten_repository_path}"',
-                            )
-                        )
+                            ),
+                        ),
                     )
                     )
                     config['repositories'].append(
                     config['repositories'].append(
                         dict(
                         dict(
                             repository_dict,
                             repository_dict,
                             path=rewritten_repository_path,
                             path=rewritten_repository_path,
-                        )
+                        ),
                     )
                     )
             else:
             else:
                 config['repositories'].append(repository_dict)
                 config['repositories'].append(repository_dict)
@@ -388,8 +382,8 @@ def normalize(config_filename, config):
                     levelno=logging.WARNING,
                     levelno=logging.WARNING,
                     levelname='WARNING',
                     levelname='WARNING',
                     msg=f'{config_filename}: The prefix option is deprecated and support will be removed from a future release. Use archive_name_format or match_archives instead.',
                     msg=f'{config_filename}: The prefix option is deprecated and support will be removed from a future release. Use archive_name_format or match_archives instead.',
-                )
-            )
+                ),
+            ),
         )
         )
 
 
     return logs
     return logs

+ 15 - 15
borgmatic/config/override.py

@@ -18,7 +18,7 @@ def set_values(config, keys, value):
     if len(keys) == 1:
     if len(keys) == 1:
         if isinstance(config, list):
         if isinstance(config, list):
             raise ValueError(
             raise ValueError(
-                'When overriding a list option, the value must use list syntax (e.g., "[foo, bar]" or "[{key: value}]" as appropriate)'
+                'When overriding a list option, the value must use list syntax (e.g., "[foo, bar]" or "[{key: value}]" as appropriate)',
             )
             )
 
 
         config[first_key] = value
         config[first_key] = value
@@ -69,11 +69,11 @@ def type_for_option(schema, option_keys):
     '''
     '''
     option_schema = schema
     option_schema = schema
 
 
-    for key in option_keys:
-        try:
+    try:
+        for key in option_keys:
             option_schema = option_schema['properties'][key]
             option_schema = option_schema['properties'][key]
-        except KeyError:
-            return None
+    except KeyError:
+        return None
 
 
     try:
     try:
         return option_schema['type']
         return option_schema['type']
@@ -103,8 +103,8 @@ def parse_overrides(raw_overrides, schema):
 
 
     parsed_overrides = []
     parsed_overrides = []
 
 
-    for raw_override in raw_overrides:
-        try:
+    try:
+        for raw_override in raw_overrides:
             raw_keys, value = raw_override.split('=', 1)
             raw_keys, value = raw_override.split('=', 1)
             keys = tuple(raw_keys.split('.'))
             keys = tuple(raw_keys.split('.'))
             option_type = type_for_option(schema, keys)
             option_type = type_for_option(schema, keys)
@@ -113,14 +113,14 @@ def parse_overrides(raw_overrides, schema):
                 (
                 (
                     keys,
                     keys,
                     convert_value_type(value, option_type),
                     convert_value_type(value, option_type),
-                )
+                ),
             )
             )
-        except ValueError:
-            raise ValueError(
-                f"Invalid override '{raw_override}'. Make sure you use the form: OPTION=VALUE or OPTION.SUBOPTION=VALUE"
-            )
-        except ruamel.yaml.error.YAMLError as error:
-            raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
+    except ValueError:
+        raise ValueError(
+            f"Invalid override '{raw_override}'. Make sure you use the form: OPTION=VALUE or OPTION.SUBOPTION=VALUE",
+        )
+    except ruamel.yaml.error.YAMLError as error:
+        raise ValueError(f"Invalid override '{raw_override}': {error.problem}")
 
 
     return tuple(parsed_overrides)
     return tuple(parsed_overrides)
 
 
@@ -139,7 +139,7 @@ def apply_overrides(config, schema, raw_overrides):
 
 
     if overrides:
     if overrides:
         logger.warning(
         logger.warning(
-            "The --override flag is deprecated and will be removed from a future release. Instead, use a command-line flag corresponding to the configuration option you'd like to set."
+            "The --override flag is deprecated and will be removed from a future release. Instead, use a command-line flag corresponding to the configuration option you'd like to set.",
         )
         )
 
 
     for keys, value in overrides:
     for keys, value in overrides:

+ 8 - 8
borgmatic/config/paths.py

@@ -1,3 +1,4 @@
+import contextlib
 import logging
 import logging
 import os
 import os
 import tempfile
 import tempfile
@@ -34,7 +35,8 @@ TEMPORARY_DIRECTORY_PREFIX = 'borgmatic-'
 
 
 
 
 def replace_temporary_subdirectory_with_glob(
 def replace_temporary_subdirectory_with_glob(
-    path, temporary_directory_prefix=TEMPORARY_DIRECTORY_PREFIX
+    path,
+    temporary_directory_prefix=TEMPORARY_DIRECTORY_PREFIX,
 ):
 ):
     '''
     '''
     Given an absolute temporary directory path and an optional temporary directory prefix, look for
     Given an absolute temporary directory path and an optional temporary directory prefix, look for
@@ -124,7 +126,7 @@ class Runtime_directory:
                 base_path if final_directory == 'borgmatic' else runtime_directory,
                 base_path if final_directory == 'borgmatic' else runtime_directory,
                 '.',  # Borg 1.4+ "slashdot" hack.
                 '.',  # Borg 1.4+ "slashdot" hack.
                 'borgmatic',
                 'borgmatic',
-            )
+            ),
         )
         )
         os.makedirs(self.runtime_path, mode=0o700, exist_ok=True)
         os.makedirs(self.runtime_path, mode=0o700, exist_ok=True)
 
 
@@ -141,13 +143,11 @@ class Runtime_directory:
         Delete any temporary directory that was created as part of initialization.
         Delete any temporary directory that was created as part of initialization.
         '''
         '''
         if self.temporary_directory:
         if self.temporary_directory:
-            try:
-                self.temporary_directory.cleanup()
             # The cleanup() call errors if, for instance, there's still a
             # The cleanup() call errors if, for instance, there's still a
             # mounted filesystem within the temporary directory. There's
             # mounted filesystem within the temporary directory. There's
             # nothing we can do about that here, so swallow the error.
             # nothing we can do about that here, so swallow the error.
-            except OSError:
-                pass
+            with contextlib.suppress(OSError):
+                self.temporary_directory.cleanup()
 
 
 
 
 def make_runtime_directory_glob(borgmatic_runtime_directory):
 def make_runtime_directory_glob(borgmatic_runtime_directory):
@@ -160,7 +160,7 @@ def make_runtime_directory_glob(borgmatic_runtime_directory):
         *(
         *(
             '*' if subdirectory.startswith(TEMPORARY_DIRECTORY_PREFIX) else subdirectory
             '*' if subdirectory.startswith(TEMPORARY_DIRECTORY_PREFIX) else subdirectory
             for subdirectory in os.path.normpath(borgmatic_runtime_directory).split(os.path.sep)
             for subdirectory in os.path.normpath(borgmatic_runtime_directory).split(os.path.sep)
-        )
+        ),
     )
     )
 
 
 
 
@@ -177,5 +177,5 @@ def get_borgmatic_state_directory(config):
             or os.environ.get('STATE_DIRECTORY')  # Set by systemd if configured.
             or os.environ.get('STATE_DIRECTORY')  # Set by systemd if configured.
             or '~/.local/state',
             or '~/.local/state',
             'borgmatic',
             'borgmatic',
-        )
+        ),
     )
     )

+ 4 - 10
borgmatic/config/schema.py

@@ -14,8 +14,8 @@ def get_properties(schema):
             item
             item
             for item in itertools.chain(
             for item in itertools.chain(
                 *itertools.zip_longest(
                 *itertools.zip_longest(
-                    *[sub_schema['properties'].items() for sub_schema in schema['oneOf']]
-                )
+                    *[sub_schema['properties'].items() for sub_schema in schema['oneOf']],
+                ),
             )
             )
             if item is not None
             if item is not None
         )
         )
@@ -61,12 +61,6 @@ def compare_types(schema_type, target_types, match=any):
     list must be in the target types.
     list must be in the target types.
     '''
     '''
     if isinstance(schema_type, list):
     if isinstance(schema_type, list):
-        if match(element_schema_type in target_types for element_schema_type in schema_type):
-            return True
+        return match(element_schema_type in target_types for element_schema_type in schema_type)
 
 
-        return False
-
-    if schema_type in target_types:
-        return True
-
-    return False
+    return schema_type in target_types

+ 15 - 8
borgmatic/config/validate.py

@@ -17,7 +17,7 @@ def schema_filename():
     '''
     '''
     schema_path = os.path.join(os.path.dirname(borgmatic.config.__file__), 'schema.yaml')
     schema_path = os.path.join(os.path.dirname(borgmatic.config.__file__), 'schema.yaml')
 
 
-    with open(schema_path):
+    with open(schema_path, encoding='utf-8'):
         return schema_path
         return schema_path
 
 
 
 
@@ -97,7 +97,11 @@ def apply_logical_validation(config_filename, parsed_configuration):
 
 
 
 
 def parse_configuration(
 def parse_configuration(
-    config_filename, schema_filename, arguments, overrides=None, resolve_env=True
+    config_filename,
+    schema_filename,
+    arguments,
+    overrides=None,
+    resolve_env=True,
 ):
 ):
     '''
     '''
     Given the path to a config filename in YAML format, the path to a schema filename in a YAML
     Given the path to a config filename in YAML format, the path to a schema filename in a YAML
@@ -147,7 +151,8 @@ def parse_configuration(
 
 
     if validation_errors:
     if validation_errors:
         raise Validation_error(
         raise Validation_error(
-            config_filename, tuple(format_json_error(error) for error in validation_errors)
+            config_filename,
+            tuple(format_json_error(error) for error in validation_errors),
         )
         )
 
 
     apply_logical_validation(config_filename, config)
     apply_logical_validation(config_filename, config)
@@ -166,13 +171,14 @@ def normalize_repository_path(repository, base=None):
         return (
         return (
             os.path.abspath(os.path.join(base, repository)) if base else os.path.abspath(repository)
             os.path.abspath(os.path.join(base, repository)) if base else os.path.abspath(repository)
         )
         )
-    elif repository.startswith('file://'):
+
+    if repository.startswith('file://'):
         local_path = repository.partition('file://')[-1]
         local_path = repository.partition('file://')[-1]
         return (
         return (
             os.path.abspath(os.path.join(base, local_path)) if base else os.path.abspath(local_path)
             os.path.abspath(os.path.join(base, local_path)) if base else os.path.abspath(local_path)
         )
         )
-    else:
-        return repository
+
+    return repository
 
 
 
 
 def glob_match(first, second):
 def glob_match(first, second):
@@ -199,7 +205,8 @@ def repositories_match(first, second):
         second = {'path': second, 'label': second}
         second = {'path': second, 'label': second}
 
 
     return glob_match(first.get('label'), second.get('label')) or glob_match(
     return glob_match(first.get('label'), second.get('label')) or glob_match(
-        normalize_repository_path(first.get('path')), normalize_repository_path(second.get('path'))
+        normalize_repository_path(first.get('path')),
+        normalize_repository_path(second.get('path')),
     )
     )
 
 
 
 
@@ -220,7 +227,7 @@ def guard_configuration_contains_repository(repository, configurations):
             for config in configurations.values()
             for config in configurations.values()
             for config_repository in config['repositories']
             for config_repository in config['repositories']
             if repositories_match(config_repository, repository)
             if repositories_match(config_repository, repository)
-        )
+        ),
     )
     )
 
 
     if count == 0:
     if count == 0:

+ 30 - 18
borgmatic/execute.py

@@ -43,12 +43,13 @@ def interpret_exit_code(command, exit_code, borg_local_path=None, borg_exit_code
 
 
                 if treat_as == 'error':
                 if treat_as == 'error':
                     logger.error(
                     logger.error(
-                        f'Treating exit code {exit_code} as an error, as per configuration'
+                        f'Treating exit code {exit_code} as an error, as per configuration',
                     )
                     )
                     return Exit_status.ERROR
                     return Exit_status.ERROR
-                elif treat_as == 'warning':
+
+                if treat_as == 'warning':
                     logger.warning(
                     logger.warning(
-                        f'Treating exit code {exit_code} as a warning, as per configuration'
+                        f'Treating exit code {exit_code} as a warning, as per configuration',
                     )
                     )
                     return Exit_status.WARNING
                     return Exit_status.WARNING
 
 
@@ -103,7 +104,7 @@ def append_last_lines(last_lines, captured_output, line, output_log_level):
         logger.log(output_log_level, line)
         logger.log(output_log_level, line)
 
 
 
 
-def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, borg_exit_codes):
+def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, borg_exit_codes):  # noqa: PLR0912
     '''
     '''
     Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
     Given a sequence of subprocess.Popen() instances for multiple processes, log the output for each
     process with the requested log level. Additionally, raise a CalledProcessError if a process
     process with the requested log level. Additionally, raise a CalledProcessError if a process
@@ -132,7 +133,7 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
     still_running = True
     still_running = True
 
 
     # Log output for each process until they all exit.
     # Log output for each process until they all exit.
-    while True:
+    while True:  # noqa: PLR1702
         if output_buffers:
         if output_buffers:
             (ready_buffers, _, _) = select.select(output_buffers, [], [])
             (ready_buffers, _, _) = select.select(output_buffers, [], [])
 
 
@@ -182,7 +183,7 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
             command = process.args.split(' ') if isinstance(process.args, str) else process.args
             command = process.args.split(' ') if isinstance(process.args, str) else process.args
             exit_status = interpret_exit_code(command, exit_code, borg_local_path, borg_exit_codes)
             exit_status = interpret_exit_code(command, exit_code, borg_local_path, borg_exit_codes)
 
 
-            if exit_status in (Exit_status.ERROR, Exit_status.WARNING):
+            if exit_status in {Exit_status.ERROR, Exit_status.WARNING}:
                 # If an error occurs, include its output in the raised exception so that we don't
                 # If an error occurs, include its output in the raised exception so that we don't
                 # inadvertently hide error output.
                 # inadvertently hide error output.
                 output_buffer = output_buffer_for_process(process, exclude_stdouts)
                 output_buffer = output_buffer_for_process(process, exclude_stdouts)
@@ -195,7 +196,10 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
                         break
                         break
 
 
                     append_last_lines(
                     append_last_lines(
-                        last_lines, captured_outputs[process], line, output_log_level=logging.ERROR
+                        last_lines,
+                        captured_outputs[process],
+                        line,
+                        output_log_level=logging.ERROR,
                     )
                     )
 
 
                 if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
                 if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
@@ -210,7 +214,9 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
 
 
                 if exit_status == Exit_status.ERROR:
                 if exit_status == Exit_status.ERROR:
                     raise subprocess.CalledProcessError(
                     raise subprocess.CalledProcessError(
-                        exit_code, command_for_process(process), '\n'.join(last_lines)
+                        exit_code,
+                        command_for_process(process),
+                        '\n'.join(last_lines),
                     )
                     )
 
 
                 still_running = False
                 still_running = False
@@ -221,6 +227,8 @@ def log_outputs(processes, exclude_stdouts, output_log_level, borg_local_path, b
             process: '\n'.join(output_lines) for process, output_lines in captured_outputs.items()
             process: '\n'.join(output_lines) for process, output_lines in captured_outputs.items()
         }
         }
 
 
+    return None
+
 
 
 SECRET_COMMAND_FLAG_NAMES = {'--password'}
 SECRET_COMMAND_FLAG_NAMES = {'--password'}
 
 
@@ -256,19 +264,19 @@ def log_command(full_command, input_file=None, output_file=None, environment=Non
             ' '.join(
             ' '.join(
                 tuple(
                 tuple(
                     f'{key}=***'
                     f'{key}=***'
-                    for key in (environment or {}).keys()
+                    for key in (environment or {})
                     if any(
                     if any(
                         key.startswith(prefix)
                         key.startswith(prefix)
                         for prefix in PREFIXES_OF_ENVIRONMENT_VARIABLES_TO_LOG
                         for prefix in PREFIXES_OF_ENVIRONMENT_VARIABLES_TO_LOG
                     )
                     )
                 )
                 )
-                + mask_command_secrets(full_command)
+                + mask_command_secrets(full_command),
             ),
             ),
             width=MAX_LOGGED_COMMAND_LENGTH,
             width=MAX_LOGGED_COMMAND_LENGTH,
             placeholder=' ...',
             placeholder=' ...',
         )
         )
         + (f" < {getattr(input_file, 'name', input_file)}" if input_file else '')
         + (f" < {getattr(input_file, 'name', input_file)}" if input_file else '')
-        + (f" > {getattr(output_file, 'name', output_file)}" if output_file else '')
+        + (f" > {getattr(output_file, 'name', output_file)}" if output_file else ''),
     )
     )
 
 
 
 
@@ -309,12 +317,12 @@ def execute_command(
     do_not_capture = bool(output_file is DO_NOT_CAPTURE)
     do_not_capture = bool(output_file is DO_NOT_CAPTURE)
     command = ' '.join(full_command) if shell else full_command
     command = ' '.join(full_command) if shell else full_command
 
 
-    process = subprocess.Popen(
+    process = subprocess.Popen(  # noqa: S603
         command,
         command,
         stdin=input_file,
         stdin=input_file,
         stdout=None if do_not_capture else (output_file or subprocess.PIPE),
         stdout=None if do_not_capture else (output_file or subprocess.PIPE),
         stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
         stderr=None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT),
-        shell=shell,  # noqa: S602
+        shell=shell,
         env=environment,
         env=environment,
         cwd=working_directory,
         cwd=working_directory,
         close_fds=close_fds,
         close_fds=close_fds,
@@ -331,6 +339,8 @@ def execute_command(
             borg_exit_codes,
             borg_exit_codes,
         )
         )
 
 
+    return None
+
 
 
 def execute_command_and_capture_output(
 def execute_command_and_capture_output(
     full_command,
     full_command,
@@ -360,11 +370,11 @@ def execute_command_and_capture_output(
     command = ' '.join(full_command) if shell else full_command
     command = ' '.join(full_command) if shell else full_command
 
 
     try:
     try:
-        output = subprocess.check_output(
+        output = subprocess.check_output(  # noqa: S603
             command,
             command,
             stdin=input_file,
             stdin=input_file,
             stderr=subprocess.STDOUT if capture_stderr else None,
             stderr=subprocess.STDOUT if capture_stderr else None,
-            shell=shell,  # noqa: S602
+            shell=shell,
             env=environment,
             env=environment,
             cwd=working_directory,
             cwd=working_directory,
             close_fds=close_fds,
             close_fds=close_fds,
@@ -418,14 +428,14 @@ def execute_command_with_processes(
     command = ' '.join(full_command) if shell else full_command
     command = ' '.join(full_command) if shell else full_command
 
 
     try:
     try:
-        command_process = subprocess.Popen(
+        command_process = subprocess.Popen(  # noqa: S603
             command,
             command,
             stdin=input_file,
             stdin=input_file,
             stdout=None if do_not_capture else (output_file or subprocess.PIPE),
             stdout=None if do_not_capture else (output_file or subprocess.PIPE),
             stderr=(
             stderr=(
                 None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT)
                 None if do_not_capture else (subprocess.PIPE if output_file else subprocess.STDOUT)
             ),
             ),
-            shell=shell,  # noqa: S602
+            shell=shell,
             env=environment,
             env=environment,
             cwd=working_directory,
             cwd=working_directory,
             close_fds=close_fds,
             close_fds=close_fds,
@@ -442,7 +452,7 @@ def execute_command_with_processes(
 
 
     with borgmatic.logger.Log_prefix(None):  # Log command output without any prefix.
     with borgmatic.logger.Log_prefix(None):  # Log command output without any prefix.
         captured_outputs = log_outputs(
         captured_outputs = log_outputs(
-            tuple(processes) + (command_process,),
+            (*processes, command_process),
             (input_file, output_file),
             (input_file, output_file),
             output_log_level,
             output_log_level,
             borg_local_path,
             borg_local_path,
@@ -451,3 +461,5 @@ def execute_command_with_processes(
 
 
     if output_log_level is None:
     if output_log_level is None:
         return captured_outputs.get(command_process)
         return captured_outputs.get(command_process)
+
+    return None

+ 4 - 4
borgmatic/hooks/command.py

@@ -42,7 +42,7 @@ def interpolate_context(hook_description, command, context):
         # be a Borg placeholder, as Borg should hopefully consume it.
         # be a Borg placeholder, as Borg should hopefully consume it.
         if unsupported_variable not in BORG_PLACEHOLDERS:
         if unsupported_variable not in BORG_PLACEHOLDERS:
             logger.warning(
             logger.warning(
-                f'Variable "{unsupported_variable}" is not supported in the {hook_description} hook'
+                f'Variable "{unsupported_variable}" is not supported in the {hook_description} hook',
             )
             )
 
 
     return command
     return command
@@ -86,7 +86,7 @@ def filter_hooks(command_hooks, before=None, after=None, action_names=None, stat
     )
     )
 
 
 
 
-def execute_hooks(command_hooks, umask, working_directory, dry_run, **context):
+def execute_hooks(command_hooks, umask, working_directory, dry_run, **context):  # noqa: PLR0912
     '''
     '''
     Given a sequence of command hook dicts from configuration, a umask to execute with (or None), a
     Given a sequence of command hook dicts from configuration, a umask to execute with (or None), a
     working directory to execute with, and whether this is a dry run, run the commands for each
     working directory to execute with, and whether this is a dry run, run the commands for each
@@ -139,12 +139,12 @@ def execute_hooks(command_hooks, umask, working_directory, dry_run, **context):
                 if dry_run:
                 if dry_run:
                     continue
                     continue
 
 
-                borgmatic.execute.execute_command(
+                borgmatic.execute.execute_command(  # noqa: S604
                     [command],
                     [command],
                     output_log_level=(
                     output_log_level=(
                         logging.ERROR if hook_config.get('after') == 'error' else logging.ANSWER
                         logging.ERROR if hook_config.get('after') == 'error' else logging.ANSWER
                     ),
                     ),
-                    shell=True,  # noqa: S604
+                    shell=True,
                     environment=make_environment(os.environ),
                     environment=make_environment(os.environ),
                     working_directory=working_directory,
                     working_directory=working_directory,
                 )
                 )

+ 2 - 1
borgmatic/hooks/credential/container.py

@@ -34,7 +34,8 @@ def load_credential(hook_config, config, credential_parameters):
                 config.get('working_directory', ''),
                 config.get('working_directory', ''),
                 (hook_config or {}).get('secrets_directory', DEFAULT_SECRETS_DIRECTORY),
                 (hook_config or {}).get('secrets_directory', DEFAULT_SECRETS_DIRECTORY),
                 secret_name,
                 secret_name,
-            )
+            ),
+            encoding='utf-8',
         ) as secret_file:
         ) as secret_file:
             return secret_file.read().rstrip(os.linesep)
             return secret_file.read().rstrip(os.linesep)
     except (FileNotFoundError, OSError) as error:
     except (FileNotFoundError, OSError) as error:

+ 2 - 1
borgmatic/hooks/credential/file.py

@@ -23,7 +23,8 @@ def load_credential(hook_config, config, credential_parameters):
 
 
     try:
     try:
         with open(
         with open(
-            os.path.join(config.get('working_directory', ''), expanded_credential_path)
+            os.path.join(config.get('working_directory', ''), expanded_credential_path),
+            encoding='utf-8',
         ) as credential_file:
         ) as credential_file:
             return credential_file.read().rstrip(os.linesep)
             return credential_file.read().rstrip(os.linesep)
     except (FileNotFoundError, OSError) as error:
     except (FileNotFoundError, OSError) as error:

+ 4 - 1
borgmatic/hooks/credential/parse.py

@@ -120,5 +120,8 @@ def resolve_credential(value, config):
         raise ValueError(f'Cannot load credential with invalid syntax "{value}"')
         raise ValueError(f'Cannot load credential with invalid syntax "{value}"')
 
 
     return borgmatic.hooks.dispatch.call_hook(
     return borgmatic.hooks.dispatch.call_hook(
-        'load_credential', config, hook_name, tuple(credential_parameters)
+        'load_credential',
+        config,
+        hook_name,
+        tuple(credential_parameters),
     )
     )

+ 4 - 2
borgmatic/hooks/credential/systemd.py

@@ -28,14 +28,16 @@ def load_credential(hook_config, config, credential_parameters):
 
 
     if not credentials_directory:
     if not credentials_directory:
         raise ValueError(
         raise ValueError(
-            f'Cannot load credential "{credential_name}" because the systemd CREDENTIALS_DIRECTORY environment variable is not set'
+            f'Cannot load credential "{credential_name}" because the systemd CREDENTIALS_DIRECTORY environment variable is not set',
         )
         )
 
 
     if not CREDENTIAL_NAME_PATTERN.match(credential_name):
     if not CREDENTIAL_NAME_PATTERN.match(credential_name):
         raise ValueError(f'Cannot load invalid credential name "{credential_name}"')
         raise ValueError(f'Cannot load invalid credential name "{credential_name}"')
 
 
     try:
     try:
-        with open(os.path.join(credentials_directory, credential_name)) as credential_file:
+        with open(
+            os.path.join(credentials_directory, credential_name), encoding='utf-8'
+        ) as credential_file:
             return credential_file.read().rstrip(os.linesep)
             return credential_file.read().rstrip(os.linesep)
     except (FileNotFoundError, OSError) as error:
     except (FileNotFoundError, OSError) as error:
         logger.warning(error)
         logger.warning(error)

+ 15 - 12
borgmatic/hooks/data_source/bootstrap.py

@@ -1,3 +1,4 @@
+import contextlib
 import glob
 import glob
 import importlib
 import importlib
 import json
 import json
@@ -38,7 +39,9 @@ def dump_data_sources(
         return []
         return []
 
 
     borgmatic_manifest_path = os.path.join(
     borgmatic_manifest_path = os.path.join(
-        borgmatic_runtime_directory, 'bootstrap', 'manifest.json'
+        borgmatic_runtime_directory,
+        'bootstrap',
+        'manifest.json',
     )
     )
 
 
     if dry_run:
     if dry_run:
@@ -46,7 +49,7 @@ def dump_data_sources(
 
 
     os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
     os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
 
 
-    with open(borgmatic_manifest_path, 'w') as manifest_file:
+    with open(borgmatic_manifest_path, 'w', encoding='utf-8') as manifest_file:
         json.dump(
         json.dump(
             {
             {
                 'borgmatic_version': importlib.metadata.version('borgmatic'),
                 'borgmatic_version': importlib.metadata.version('borgmatic'),
@@ -57,7 +60,8 @@ def dump_data_sources(
 
 
     patterns.extend(
     patterns.extend(
         borgmatic.borg.pattern.Pattern(
         borgmatic.borg.pattern.Pattern(
-            config_path, source=borgmatic.borg.pattern.Pattern_source.HOOK
+            config_path,
+            source=borgmatic.borg.pattern.Pattern_source.HOOK,
         )
         )
         for config_path in config_paths
         for config_path in config_paths
     )
     )
@@ -65,7 +69,7 @@ def dump_data_sources(
         borgmatic.borg.pattern.Pattern(
         borgmatic.borg.pattern.Pattern(
             os.path.join(borgmatic_runtime_directory, 'bootstrap'),
             os.path.join(borgmatic_runtime_directory, 'bootstrap'),
             source=borgmatic.borg.pattern.Pattern_source.HOOK,
             source=borgmatic.borg.pattern.Pattern_source.HOOK,
-        )
+        ),
     )
     )
 
 
     return []
     return []
@@ -86,7 +90,7 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
         'bootstrap',
         'bootstrap',
     )
     )
     logger.debug(
     logger.debug(
-        f'Looking for bootstrap manifest files to remove in {manifest_glob}{dry_run_label}'
+        f'Looking for bootstrap manifest files to remove in {manifest_glob}{dry_run_label}',
     )
     )
 
 
     for manifest_directory in glob.glob(manifest_glob):
     for manifest_directory in glob.glob(manifest_glob):
@@ -96,19 +100,18 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
         if dry_run:
         if dry_run:
             continue
             continue
 
 
-        try:
+        with contextlib.suppress(FileNotFoundError):
             os.remove(manifest_file_path)
             os.remove(manifest_file_path)
-        except FileNotFoundError:
-            pass
 
 
-        try:
+        with contextlib.suppress(FileNotFoundError):
             os.rmdir(manifest_directory)
             os.rmdir(manifest_directory)
-        except FileNotFoundError:
-            pass
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    hook_config, config, borgmatic_runtime_directory, name=None
+    hook_config,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Restores are implemented via the separate, purpose-specific "bootstrap" action rather than the
     Restores are implemented via the separate, purpose-specific "bootstrap" action rather than the

+ 32 - 24
borgmatic/hooks/data_source/btrfs.py

@@ -31,8 +31,8 @@ def get_contained_subvolume_paths(btrfs_command, subvolume_path):
     '''
     '''
     try:
     try:
         btrfs_output = borgmatic.execute.execute_command_and_capture_output(
         btrfs_output = borgmatic.execute.execute_command_and_capture_output(
-            tuple(btrfs_command.split(' '))
-            + (
+            (
+                *btrfs_command.split(' '),
                 'subvolume',
                 'subvolume',
                 'list',
                 'list',
                 subvolume_path,
                 subvolume_path,
@@ -41,15 +41,18 @@ def get_contained_subvolume_paths(btrfs_command, subvolume_path):
         )
         )
     except subprocess.CalledProcessError as error:
     except subprocess.CalledProcessError as error:
         logger.debug(
         logger.debug(
-            f'Ignoring Btrfs subvolume {subvolume_path} because of error listing its subvolumes: {error}'
+            f'Ignoring Btrfs subvolume {subvolume_path} because of error listing its subvolumes: {error}',
         )
         )
 
 
         return ()
         return ()
 
 
-    return (subvolume_path,) + tuple(
-        os.path.join(subvolume_path, line.split(' ')[-1])
-        for line in btrfs_output.splitlines()
-        if line.strip()
+    return (
+        subvolume_path,
+        *tuple(
+            os.path.join(subvolume_path, line.split(' ')[-1])
+            for line in btrfs_output.splitlines()
+            if line.strip()
+        ),
     )
     )
 
 
 
 
@@ -62,8 +65,8 @@ def get_all_subvolume_paths(btrfs_command, findmnt_command):
     system.
     system.
     '''
     '''
     findmnt_output = borgmatic.execute.execute_command_and_capture_output(
     findmnt_output = borgmatic.execute.execute_command_and_capture_output(
-        tuple(findmnt_command.split(' '))
-        + (
+        (
+            *findmnt_command.split(' '),
             '-t',  # Filesystem type.
             '-t',  # Filesystem type.
             'btrfs',
             'btrfs',
             '--json',
             '--json',
@@ -88,8 +91,8 @@ def get_all_subvolume_paths(btrfs_command, findmnt_command):
                         else (filesystem['target'],)
                         else (filesystem['target'],)
                     )
                     )
                     for filesystem in json.loads(findmnt_output)['filesystems']
                     for filesystem in json.loads(findmnt_output)['filesystems']
-                )
-            )
+                ),
+            ),
         )
         )
     except json.JSONDecodeError as error:
     except json.JSONDecodeError as error:
         raise ValueError(f'Invalid {findmnt_command} JSON output: {error}')
         raise ValueError(f'Invalid {findmnt_command} JSON output: {error}')
@@ -108,8 +111,8 @@ def get_subvolume_property(btrfs_command, subvolume_path, property_name):
     Raise subprocess.CalledProcessError if the btrfs command errors.
     Raise subprocess.CalledProcessError if the btrfs command errors.
     '''
     '''
     output = borgmatic.execute.execute_command_and_capture_output(
     output = borgmatic.execute.execute_command_and_capture_output(
-        tuple(btrfs_command.split(' '))
-        + (
+        (
+            *btrfs_command.split(' '),
             'property',
             'property',
             'get',
             'get',
             '-t',  # Type.
             '-t',  # Type.
@@ -145,9 +148,9 @@ def omit_read_only_subvolume_paths(btrfs_command, subvolume_paths):
                 logger.debug(f'Ignoring Btrfs subvolume {subvolume_path} because it is read-only')
                 logger.debug(f'Ignoring Btrfs subvolume {subvolume_path} because it is read-only')
             else:
             else:
                 retained_subvolume_paths.append(subvolume_path)
                 retained_subvolume_paths.append(subvolume_path)
-        except subprocess.CalledProcessError as error:
+        except subprocess.CalledProcessError as error:  # noqa: PERF203
             logger.debug(
             logger.debug(
-                f'Error determining read-only status of Btrfs subvolume {subvolume_path}: {error}'
+                f'Error determining read-only status of Btrfs subvolume {subvolume_path}: {error}',
             )
             )
 
 
     return tuple(retained_subvolume_paths)
     return tuple(retained_subvolume_paths)
@@ -174,14 +177,16 @@ def get_subvolumes(btrfs_command, findmnt_command, patterns=None):
     # this process, so no two subvolumes end up with the same contained patterns.)
     # this process, so no two subvolumes end up with the same contained patterns.)
     for subvolume_path in reversed(
     for subvolume_path in reversed(
         omit_read_only_subvolume_paths(
         omit_read_only_subvolume_paths(
-            btrfs_command, get_all_subvolume_paths(btrfs_command, findmnt_command)
-        )
+            btrfs_command,
+            get_all_subvolume_paths(btrfs_command, findmnt_command),
+        ),
     ):
     ):
         subvolumes.extend(
         subvolumes.extend(
             Subvolume(subvolume_path, contained_patterns)
             Subvolume(subvolume_path, contained_patterns)
             for contained_patterns in (
             for contained_patterns in (
                 borgmatic.hooks.data_source.snapshot.get_contained_patterns(
                 borgmatic.hooks.data_source.snapshot.get_contained_patterns(
-                    subvolume_path, candidate_patterns
+                    subvolume_path,
+                    candidate_patterns,
                 ),
                 ),
             )
             )
             if patterns is None
             if patterns is None
@@ -282,8 +287,8 @@ def snapshot_subvolume(btrfs_command, subvolume_path, snapshot_path):  # pragma:
     os.makedirs(os.path.dirname(snapshot_path), mode=0o700, exist_ok=True)
     os.makedirs(os.path.dirname(snapshot_path), mode=0o700, exist_ok=True)
 
 
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(btrfs_command.split(' '))
-        + (
+        (
+            *btrfs_command.split(' '),
             'subvolume',
             'subvolume',
             'snapshot',
             'snapshot',
             '-r',  # Read-only.
             '-r',  # Read-only.
@@ -356,8 +361,8 @@ def delete_snapshot(btrfs_command, snapshot_path):  # pragma: no cover
     Given a Btrfs command to run and the name of a snapshot path, delete it.
     Given a Btrfs command to run and the name of a snapshot path, delete it.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(btrfs_command.split(' '))
-        + (
+        (
+            *btrfs_command.split(' '),
             'subvolume',
             'subvolume',
             'delete',
             'delete',
             snapshot_path,
             snapshot_path,
@@ -399,7 +404,7 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
         )
         )
 
 
         logger.debug(
         logger.debug(
-            f'Looking for snapshots to remove in {subvolume_snapshots_glob}{dry_run_label}'
+            f'Looking for snapshots to remove in {subvolume_snapshots_glob}{dry_run_label}',
         )
         )
 
 
         for snapshot_path in glob.glob(subvolume_snapshots_glob):
         for snapshot_path in glob.glob(subvolume_snapshots_glob):
@@ -429,7 +434,10 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    hook_config, config, borgmatic_runtime_directory, name=None
+    hook_config,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
     Restores aren't implemented, because stored files can be extracted directly with "extract".

+ 3 - 1
borgmatic/hooks/data_source/dump.py

@@ -27,7 +27,9 @@ def make_data_source_dump_filename(dump_path, name, hostname=None, port=None):
         raise ValueError(f'Invalid data source name {name}')
         raise ValueError(f'Invalid data source name {name}')
 
 
     return os.path.join(
     return os.path.join(
-        dump_path, (hostname or 'localhost') + ('' if port is None else f':{port}'), name
+        dump_path,
+        (hostname or 'localhost') + ('' if port is None else f':{port}'),
+        name,
     )
     )
 
 
 
 

+ 36 - 25
borgmatic/hooks/data_source/lvm.py

@@ -24,7 +24,8 @@ def use_streaming(hook_config, config):  # pragma: no cover
 
 
 BORGMATIC_SNAPSHOT_PREFIX = 'borgmatic-'
 BORGMATIC_SNAPSHOT_PREFIX = 'borgmatic-'
 Logical_volume = collections.namedtuple(
 Logical_volume = collections.namedtuple(
-    'Logical_volume', ('name', 'device_path', 'mount_point', 'contained_patterns')
+    'Logical_volume',
+    ('name', 'device_path', 'mount_point', 'contained_patterns'),
 )
 )
 
 
 
 
@@ -44,15 +45,15 @@ def get_logical_volumes(lsblk_command, patterns=None):
         devices_info = json.loads(
         devices_info = json.loads(
             borgmatic.execute.execute_command_and_capture_output(
             borgmatic.execute.execute_command_and_capture_output(
                 # Use lsblk instead of lvs here because lvs can't show active mounts.
                 # Use lsblk instead of lvs here because lvs can't show active mounts.
-                tuple(lsblk_command.split(' '))
-                + (
+                (
+                    *lsblk_command.split(' '),
                     '--output',
                     '--output',
                     'name,path,mountpoint,type',
                     'name,path,mountpoint,type',
                     '--json',
                     '--json',
                     '--list',
                     '--list',
                 ),
                 ),
                 close_fds=True,
                 close_fds=True,
-            )
+            ),
         )
         )
     except json.JSONDecodeError as error:
     except json.JSONDecodeError as error:
         raise ValueError(f'Invalid {lsblk_command} JSON output: {error}')
         raise ValueError(f'Invalid {lsblk_command} JSON output: {error}')
@@ -73,7 +74,8 @@ def get_logical_volumes(lsblk_command, patterns=None):
             if device['mountpoint'] and device['type'] == 'lvm'
             if device['mountpoint'] and device['type'] == 'lvm'
             for contained_patterns in (
             for contained_patterns in (
                 borgmatic.hooks.data_source.snapshot.get_contained_patterns(
                 borgmatic.hooks.data_source.snapshot.get_contained_patterns(
-                    device['mountpoint'], candidate_patterns
+                    device['mountpoint'],
+                    candidate_patterns,
                 ),
                 ),
             )
             )
             if not patterns
             if not patterns
@@ -98,8 +100,8 @@ def snapshot_logical_volume(
     snapshot, and a snapshot size string, create a new LVM snapshot.
     snapshot, and a snapshot size string, create a new LVM snapshot.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(lvcreate_command.split(' '))
-        + (
+        (
+            *lvcreate_command.split(' '),
             '--snapshot',
             '--snapshot',
             ('--extents' if '%' in snapshot_size else '--size'),
             ('--extents' if '%' in snapshot_size else '--size'),
             snapshot_size,
             snapshot_size,
@@ -123,8 +125,8 @@ def mount_snapshot(mount_command, snapshot_device, snapshot_mount_path):  # prag
     os.makedirs(snapshot_mount_path, mode=0o700, exist_ok=True)
     os.makedirs(snapshot_mount_path, mode=0o700, exist_ok=True)
 
 
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(mount_command.split(' '))
-        + (
+        (
+            *mount_command.split(' '),
             '-o',
             '-o',
             'ro',
             'ro',
             snapshot_device,
             snapshot_device,
@@ -162,7 +164,7 @@ def make_borg_snapshot_pattern(pattern, logical_volume, normalized_runtime_direc
         # /var/spool would result in overlapping snapshot patterns and therefore colliding mount
         # /var/spool would result in overlapping snapshot patterns and therefore colliding mount
         # attempts.
         # attempts.
         hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
         hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
-            MOUNT_POINT_HASH_LENGTH
+            MOUNT_POINT_HASH_LENGTH,
         ),
         ),
         '.',  # Borg 1.4+ "slashdot" hack.
         '.',  # Borg 1.4+ "slashdot" hack.
         # Included so that the source directory ends up in the Borg archive at its "original" path.
         # Included so that the source directory ends up in the Borg archive at its "original" path.
@@ -218,7 +220,7 @@ def dump_data_sources(
     for logical_volume in requested_logical_volumes:
     for logical_volume in requested_logical_volumes:
         snapshot_name = f'{logical_volume.name}_{snapshot_suffix}'
         snapshot_name = f'{logical_volume.name}_{snapshot_suffix}'
         logger.debug(
         logger.debug(
-            f'Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}'
+            f'Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}',
         )
         )
 
 
         if not dry_run:
         if not dry_run:
@@ -233,7 +235,8 @@ def dump_data_sources(
         if not dry_run:
         if not dry_run:
             try:
             try:
                 snapshot = get_snapshots(
                 snapshot = get_snapshots(
-                    hook_config.get('lvs_command', 'lvs'), snapshot_name=snapshot_name
+                    hook_config.get('lvs_command', 'lvs'),
+                    snapshot_name=snapshot_name,
                 )[0]
                 )[0]
             except IndexError:
             except IndexError:
                 raise ValueError(f'Cannot find LVM snapshot {snapshot_name}')
                 raise ValueError(f'Cannot find LVM snapshot {snapshot_name}')
@@ -244,25 +247,29 @@ def dump_data_sources(
             normalized_runtime_directory,
             normalized_runtime_directory,
             'lvm_snapshots',
             'lvm_snapshots',
             hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
             hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
-                MOUNT_POINT_HASH_LENGTH
+                MOUNT_POINT_HASH_LENGTH,
             ),
             ),
             logical_volume.mount_point.lstrip(os.path.sep),
             logical_volume.mount_point.lstrip(os.path.sep),
         )
         )
 
 
         logger.debug(
         logger.debug(
-            f'Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            f'Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}',
         )
         )
 
 
         if dry_run:
         if dry_run:
             continue
             continue
 
 
         mount_snapshot(
         mount_snapshot(
-            hook_config.get('mount_command', 'mount'), snapshot.device_path, snapshot_mount_path
+            hook_config.get('mount_command', 'mount'),
+            snapshot.device_path,
+            snapshot_mount_path,
         )
         )
 
 
         for pattern in logical_volume.contained_patterns:
         for pattern in logical_volume.contained_patterns:
             snapshot_pattern = make_borg_snapshot_pattern(
             snapshot_pattern = make_borg_snapshot_pattern(
-                pattern, logical_volume, normalized_runtime_directory
+                pattern,
+                logical_volume,
+                normalized_runtime_directory,
             )
             )
 
 
             # Attempt to update the pattern in place, since pattern order matters to Borg.
             # Attempt to update the pattern in place, since pattern order matters to Borg.
@@ -279,7 +286,7 @@ def unmount_snapshot(umount_command, snapshot_mount_path):  # pragma: no cover
     Given a umount command to run and the mount path of a snapshot, unmount it.
     Given a umount command to run and the mount path of a snapshot, unmount it.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(umount_command.split(' ')) + (snapshot_mount_path,),
+        (*umount_command.split(' '), snapshot_mount_path),
         output_log_level=logging.DEBUG,
         output_log_level=logging.DEBUG,
         close_fds=True,
         close_fds=True,
     )
     )
@@ -290,8 +297,8 @@ def remove_snapshot(lvremove_command, snapshot_device_path):  # pragma: no cover
     Given an lvremove command to run and the device path of a snapshot, remove it it.
     Given an lvremove command to run and the device path of a snapshot, remove it it.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(lvremove_command.split(' '))
-        + (
+        (
+            *lvremove_command.split(' '),
             '--force',  # Suppress an interactive "are you sure?" type prompt.
             '--force',  # Suppress an interactive "are you sure?" type prompt.
             snapshot_device_path,
             snapshot_device_path,
         ),
         ),
@@ -316,8 +323,8 @@ def get_snapshots(lvs_command, snapshot_name=None):
         snapshot_info = json.loads(
         snapshot_info = json.loads(
             borgmatic.execute.execute_command_and_capture_output(
             borgmatic.execute.execute_command_and_capture_output(
                 # Use lvs instead of lsblk here because lsblk can't filter to just snapshots.
                 # Use lvs instead of lsblk here because lsblk can't filter to just snapshots.
-                tuple(lvs_command.split(' '))
-                + (
+                (
+                    *lvs_command.split(' '),
                     '--report-format',
                     '--report-format',
                     'json',
                     'json',
                     '--options',
                     '--options',
@@ -326,7 +333,7 @@ def get_snapshots(lvs_command, snapshot_name=None):
                     'lv_attr =~ ^s',  # Filter to just snapshots.
                     'lv_attr =~ ^s',  # Filter to just snapshots.
                 ),
                 ),
                 close_fds=True,
                 close_fds=True,
-            )
+            ),
         )
         )
     except json.JSONDecodeError as error:
     except json.JSONDecodeError as error:
         raise ValueError(f'Invalid {lvs_command} JSON output: {error}')
         raise ValueError(f'Invalid {lvs_command} JSON output: {error}')
@@ -343,7 +350,7 @@ def get_snapshots(lvs_command, snapshot_name=None):
         raise ValueError(f'Invalid {lvs_command} output: Missing key "{error}"')
         raise ValueError(f'Invalid {lvs_command} output: Missing key "{error}"')
 
 
 
 
-def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):  # noqa: PLR0912
     '''
     '''
     Given an LVM configuration dict, a configuration dict, the borgmatic runtime directory, and
     Given an LVM configuration dict, a configuration dict, the borgmatic runtime directory, and
     whether this is a dry run, unmount and delete any LVM snapshots created by borgmatic. If this is
     whether this is a dry run, unmount and delete any LVM snapshots created by borgmatic. If this is
@@ -381,7 +388,8 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
 
 
         for logical_volume in logical_volumes:
         for logical_volume in logical_volumes:
             snapshot_mount_path = os.path.join(
             snapshot_mount_path = os.path.join(
-                snapshots_directory, logical_volume.mount_point.lstrip(os.path.sep)
+                snapshots_directory,
+                logical_volume.mount_point.lstrip(os.path.sep),
             )
             )
 
 
             # If the snapshot mount path is empty, this is probably just a "shadow" of a nested
             # If the snapshot mount path is empty, this is probably just a "shadow" of a nested
@@ -440,7 +448,10 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    hook_config, config, borgmatic_runtime_directory, name=None
+    hook_config,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
     Restores aren't implemented, because stored files can be extracted directly with "extract".

+ 34 - 16
borgmatic/hooks/data_source/mariadb.py

@@ -24,7 +24,7 @@ def make_dump_path(base_directory):  # pragma: no cover
     return dump.make_data_source_dump_path(base_directory, 'mariadb_databases')
     return dump.make_data_source_dump_path(base_directory, 'mariadb_databases')
 
 
 
 
-DEFAULTS_EXTRA_FILE_FLAG_PATTERN = re.compile('^--defaults-extra-file=(?P<filename>.*)$')
+DEFAULTS_EXTRA_FILE_FLAG_PATTERN = re.compile(r'^--defaults-extra-file=(?P<filename>.*)$')
 
 
 
 
 def parse_extra_options(extra_options):
 def parse_extra_options(extra_options):
@@ -71,7 +71,7 @@ def make_defaults_file_options(username=None, password=None, defaults_extra_file
         (
         (
             (f'user={username}' if username is not None else ''),
             (f'user={username}' if username is not None else ''),
             (f'password="{escaped_password}"' if escaped_password is not None else ''),
             (f'password="{escaped_password}"' if escaped_password is not None else ''),
-        )
+        ),
     ).strip()
     ).strip()
 
 
     if not values:
     if not values:
@@ -94,7 +94,7 @@ def make_defaults_file_options(username=None, password=None, defaults_extra_file
     include = f'!include {defaults_extra_filename}\n' if defaults_extra_filename else ''
     include = f'!include {defaults_extra_filename}\n' if defaults_extra_filename else ''
 
 
     read_file_descriptor, write_file_descriptor = os.pipe()
     read_file_descriptor, write_file_descriptor = os.pipe()
-    os.write(write_file_descriptor, f'{include}[client]\n{values}'.encode('utf-8'))
+    os.write(write_file_descriptor, f'{include}[client]\n{values}'.encode())
     os.close(write_file_descriptor)
     os.close(write_file_descriptor)
 
 
     # This plus subprocess.Popen(..., close_fds=False) in execute.py is necessary for the database
     # This plus subprocess.Popen(..., close_fds=False) in execute.py is necessary for the database
@@ -182,7 +182,7 @@ def execute_dump_command(
 
 
     if os.path.exists(dump_filename):
     if os.path.exists(dump_filename):
         logger.warning(
         logger.warning(
-            f'Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}'
+            f'Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}',
         )
         )
         return None
         return None
 
 
@@ -263,10 +263,12 @@ def dump_data_sources(
     for database in databases:
     for database in databases:
         dump_path = make_dump_path(borgmatic_runtime_directory)
         dump_path = make_dump_path(borgmatic_runtime_directory)
         username = borgmatic.hooks.credential.parse.resolve_credential(
         username = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('username'), config
+            database.get('username'),
+            config,
         )
         )
         password = borgmatic.hooks.credential.parse.resolve_credential(
         password = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('password'), config
+            database.get('password'),
+            config,
         )
         )
         environment = dict(
         environment = dict(
             os.environ,
             os.environ,
@@ -277,7 +279,12 @@ def dump_data_sources(
             ),
             ),
         )
         )
         dump_database_names = database_names_to_dump(
         dump_database_names = database_names_to_dump(
-            database, config, username, password, environment, dry_run
+            database,
+            config,
+            username,
+            password,
+            environment,
+            dry_run,
         )
         )
 
 
         if not dump_database_names:
         if not dump_database_names:
@@ -301,7 +308,7 @@ def dump_data_sources(
                         environment,
                         environment,
                         dry_run,
                         dry_run,
                         dry_run_label,
                         dry_run_label,
-                    )
+                    ),
                 )
                 )
         else:
         else:
             processes.append(
             processes.append(
@@ -315,7 +322,7 @@ def dump_data_sources(
                     environment,
                     environment,
                     dry_run,
                     dry_run,
                     dry_run_label,
                     dry_run_label,
-                )
+                ),
             )
             )
 
 
     if not dry_run:
     if not dry_run:
@@ -323,14 +330,17 @@ def dump_data_sources(
             borgmatic.borg.pattern.Pattern(
             borgmatic.borg.pattern.Pattern(
                 os.path.join(borgmatic_runtime_directory, 'mariadb_databases'),
                 os.path.join(borgmatic_runtime_directory, 'mariadb_databases'),
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
-            )
+            ),
         )
         )
 
 
     return [process for process in processes if process]
     return [process for process in processes if process]
 
 
 
 
 def remove_data_source_dumps(
 def remove_data_source_dumps(
-    databases, config, borgmatic_runtime_directory, dry_run
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
     Remove all database dump files for this hook regardless of the given databases. Use the
@@ -341,7 +351,10 @@ def remove_data_source_dumps(
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    databases, config, borgmatic_runtime_directory, name=None
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
@@ -353,10 +366,14 @@ def make_data_source_dump_patterns(
     return (
     return (
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory), name, hostname='*'
+            make_dump_path(borgmatic_runtime_directory),
+            name,
+            hostname='*',
         ),
         ),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_source_directory), name, hostname='*'
+            make_dump_path(borgmatic_source_directory),
+            name,
+            hostname='*',
         ),
         ),
     )
     )
 
 
@@ -378,10 +395,11 @@ def restore_data_source_dump(
     '''
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     hostname = connection_params['hostname'] or data_source.get(
     hostname = connection_params['hostname'] or data_source.get(
-        'restore_hostname', data_source.get('hostname')
+        'restore_hostname',
+        data_source.get('hostname'),
     )
     )
     port = str(
     port = str(
-        connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
+        connection_params['port'] or data_source.get('restore_port', data_source.get('port', '')),
     )
     )
     tls = data_source.get('restore_tls', data_source.get('tls'))
     tls = data_source.get('restore_tls', data_source.get('tls'))
     username = borgmatic.hooks.credential.parse.resolve_credential(
     username = borgmatic.hooks.credential.parse.resolve_credential(

+ 29 - 13
borgmatic/hooks/data_source/mongodb.py

@@ -78,7 +78,7 @@ def dump_data_sources(
         else:
         else:
             dump.create_named_pipe_for_dump(dump_filename)
             dump.create_named_pipe_for_dump(dump_filename)
             processes.append(
             processes.append(
-                execute_command(command, shell=True, run_to_completion=False)  # noqa: S604
+                execute_command(command, shell=True, run_to_completion=False),  # noqa: S604
             )
             )
 
 
     if not dry_run:
     if not dry_run:
@@ -86,7 +86,7 @@ def dump_data_sources(
             borgmatic.borg.pattern.Pattern(
             borgmatic.borg.pattern.Pattern(
                 os.path.join(borgmatic_runtime_directory, 'mongodb_databases'),
                 os.path.join(borgmatic_runtime_directory, 'mongodb_databases'),
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
-            )
+            ),
         )
         )
 
 
     return processes
     return processes
@@ -104,7 +104,7 @@ def make_password_config_file(password):
     logger.debug('Writing MongoDB password to configuration file pipe')
     logger.debug('Writing MongoDB password to configuration file pipe')
 
 
     read_file_descriptor, write_file_descriptor = os.pipe()
     read_file_descriptor, write_file_descriptor = os.pipe()
-    os.write(write_file_descriptor, f'password: {password}'.encode('utf-8'))
+    os.write(write_file_descriptor, f'password: {password}'.encode())
     os.close(write_file_descriptor)
     os.close(write_file_descriptor)
 
 
     # This plus subprocess.Popen(..., close_fds=False) in execute.py is necessary for the database
     # This plus subprocess.Popen(..., close_fds=False) in execute.py is necessary for the database
@@ -135,8 +135,9 @@ def build_dump_command(database, config, dump_filename, dump_format):
                 '--username',
                 '--username',
                 shlex.quote(
                 shlex.quote(
                     borgmatic.hooks.credential.parse.resolve_credential(
                     borgmatic.hooks.credential.parse.resolve_credential(
-                        database['username'], config
-                    )
+                        database['username'],
+                        config,
+                    ),
                 ),
                 ),
             )
             )
             if 'username' in database
             if 'username' in database
@@ -159,7 +160,10 @@ def build_dump_command(database, config, dump_filename, dump_format):
 
 
 
 
 def remove_data_source_dumps(
 def remove_data_source_dumps(
-    databases, config, borgmatic_runtime_directory, dry_run
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
     Remove all database dump files for this hook regardless of the given databases. Use the
@@ -170,7 +174,10 @@ def remove_data_source_dumps(
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    databases, config, borgmatic_runtime_directory, name=None
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
@@ -182,10 +189,14 @@ def make_data_source_dump_patterns(
     return (
     return (
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory), name, hostname='*'
+            make_dump_path(borgmatic_runtime_directory),
+            name,
+            hostname='*',
         ),
         ),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_source_directory), name, hostname='*'
+            make_dump_path(borgmatic_source_directory),
+            name,
+            hostname='*',
         ),
         ),
     )
     )
 
 
@@ -216,7 +227,11 @@ def restore_data_source_dump(
         data_source.get('hostname'),
         data_source.get('hostname'),
     )
     )
     restore_command = build_restore_command(
     restore_command = build_restore_command(
-        extract_process, data_source, config, dump_filename, connection_params
+        extract_process,
+        data_source,
+        config,
+        dump_filename,
+        connection_params,
     )
     )
 
 
     logger.debug(f"Restoring MongoDB database {data_source['name']}{dry_run_label}")
     logger.debug(f"Restoring MongoDB database {data_source['name']}{dry_run_label}")
@@ -238,7 +253,8 @@ def build_restore_command(extract_process, database, config, dump_filename, conn
     Return the custom mongorestore_command from a single database configuration.
     Return the custom mongorestore_command from a single database configuration.
     '''
     '''
     hostname = connection_params['hostname'] or database.get(
     hostname = connection_params['hostname'] or database.get(
-        'restore_hostname', database.get('hostname')
+        'restore_hostname',
+        database.get('hostname'),
     )
     )
     port = str(connection_params['port'] or database.get('restore_port', database.get('port', '')))
     port = str(connection_params['port'] or database.get('restore_port', database.get('port', '')))
     username = borgmatic.hooks.credential.parse.resolve_credential(
     username = borgmatic.hooks.credential.parse.resolve_credential(
@@ -256,10 +272,10 @@ def build_restore_command(extract_process, database, config, dump_filename, conn
         config,
         config,
     )
     )
 
 
-    command = list(
+    command = [
         shlex.quote(part)
         shlex.quote(part)
         for part in shlex.split(database.get('mongorestore_command') or 'mongorestore')
         for part in shlex.split(database.get('mongorestore_command') or 'mongorestore')
-    )
+    ]
 
 
     if extract_process:
     if extract_process:
         command.append('--archive')
         command.append('--archive')

+ 40 - 16
borgmatic/hooks/data_source/mysql.py

@@ -51,7 +51,9 @@ def database_names_to_dump(database, config, username, password, environment, dr
         mysql_show_command
         mysql_show_command
         + (
         + (
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
-                username, password, defaults_extra_filename
+                username,
+                password,
+                defaults_extra_filename,
             )
             )
             if password_transport == 'pipe'
             if password_transport == 'pipe'
             else ()
             else ()
@@ -106,7 +108,7 @@ def execute_dump_command(
 
 
     if os.path.exists(dump_filename):
     if os.path.exists(dump_filename):
         logger.warning(
         logger.warning(
-            f'Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
+            f'Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}',
         )
         )
         return None
         return None
 
 
@@ -121,7 +123,9 @@ def execute_dump_command(
         mysql_dump_command
         mysql_dump_command
         + (
         + (
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
-                username, password, defaults_extra_filename
+                username,
+                password,
+                defaults_extra_filename,
             )
             )
             if password_transport == 'pipe'
             if password_transport == 'pipe'
             else ()
             else ()
@@ -190,10 +194,12 @@ def dump_data_sources(
     for database in databases:
     for database in databases:
         dump_path = make_dump_path(borgmatic_runtime_directory)
         dump_path = make_dump_path(borgmatic_runtime_directory)
         username = borgmatic.hooks.credential.parse.resolve_credential(
         username = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('username'), config
+            database.get('username'),
+            config,
         )
         )
         password = borgmatic.hooks.credential.parse.resolve_credential(
         password = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('password'), config
+            database.get('password'),
+            config,
         )
         )
         environment = dict(
         environment = dict(
             os.environ,
             os.environ,
@@ -204,7 +210,12 @@ def dump_data_sources(
             ),
             ),
         )
         )
         dump_database_names = database_names_to_dump(
         dump_database_names = database_names_to_dump(
-            database, config, username, password, environment, dry_run
+            database,
+            config,
+            username,
+            password,
+            environment,
+            dry_run,
         )
         )
 
 
         if not dump_database_names:
         if not dump_database_names:
@@ -228,7 +239,7 @@ def dump_data_sources(
                         environment,
                         environment,
                         dry_run,
                         dry_run,
                         dry_run_label,
                         dry_run_label,
-                    )
+                    ),
                 )
                 )
         else:
         else:
             processes.append(
             processes.append(
@@ -242,7 +253,7 @@ def dump_data_sources(
                     environment,
                     environment,
                     dry_run,
                     dry_run,
                     dry_run_label,
                     dry_run_label,
-                )
+                ),
             )
             )
 
 
     if not dry_run:
     if not dry_run:
@@ -250,14 +261,17 @@ def dump_data_sources(
             borgmatic.borg.pattern.Pattern(
             borgmatic.borg.pattern.Pattern(
                 os.path.join(borgmatic_runtime_directory, 'mysql_databases'),
                 os.path.join(borgmatic_runtime_directory, 'mysql_databases'),
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
-            )
+            ),
         )
         )
 
 
     return [process for process in processes if process]
     return [process for process in processes if process]
 
 
 
 
 def remove_data_source_dumps(
 def remove_data_source_dumps(
-    databases, config, borgmatic_runtime_directory, dry_run
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
     Remove all database dump files for this hook regardless of the given databases. Use the
@@ -268,7 +282,10 @@ def remove_data_source_dumps(
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    databases, config, borgmatic_runtime_directory, name=None
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
@@ -280,10 +297,14 @@ def make_data_source_dump_patterns(
     return (
     return (
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory), name, hostname='*'
+            make_dump_path(borgmatic_runtime_directory),
+            name,
+            hostname='*',
         ),
         ),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_source_directory), name, hostname='*'
+            make_dump_path(borgmatic_source_directory),
+            name,
+            hostname='*',
         ),
         ),
     )
     )
 
 
@@ -305,10 +326,11 @@ def restore_data_source_dump(
     '''
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     hostname = connection_params['hostname'] or data_source.get(
     hostname = connection_params['hostname'] or data_source.get(
-        'restore_hostname', data_source.get('hostname')
+        'restore_hostname',
+        data_source.get('hostname'),
     )
     )
     port = str(
     port = str(
-        connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
+        connection_params['port'] or data_source.get('restore_port', data_source.get('port', '')),
     )
     )
     tls = data_source.get('restore_tls', data_source.get('tls'))
     tls = data_source.get('restore_tls', data_source.get('tls'))
     username = borgmatic.hooks.credential.parse.resolve_credential(
     username = borgmatic.hooks.credential.parse.resolve_credential(
@@ -337,7 +359,9 @@ def restore_data_source_dump(
         mysql_restore_command
         mysql_restore_command
         + (
         + (
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
             borgmatic.hooks.data_source.mariadb.make_defaults_file_options(
-                username, password, defaults_extra_filename
+                username,
+                password,
+                defaults_extra_filename,
             )
             )
             if password_transport == 'pipe'
             if password_transport == 'pipe'
             else ()
             else ()

+ 34 - 19
borgmatic/hooks/data_source/postgresql.py

@@ -43,7 +43,8 @@ def make_environment(database, config, restore_connection_params=None):
             )
             )
         else:
         else:
             environment['PGPASSWORD'] = borgmatic.hooks.credential.parse.resolve_credential(
             environment['PGPASSWORD'] = borgmatic.hooks.credential.parse.resolve_credential(
-                database['password'], config
+                database['password'],
+                config,
             )
             )
     except (AttributeError, KeyError):
     except (AttributeError, KeyError):
         pass
         pass
@@ -179,7 +180,7 @@ def dump_data_sources(
             )
             )
             if os.path.exists(dump_filename):
             if os.path.exists(dump_filename):
                 logger.warning(
                 logger.warning(
-                    f'Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
+                    f'Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}',
                 )
                 )
                 continue
                 continue
 
 
@@ -197,8 +198,9 @@ def dump_data_sources(
                         '--username',
                         '--username',
                         shlex.quote(
                         shlex.quote(
                             borgmatic.hooks.credential.parse.resolve_credential(
                             borgmatic.hooks.credential.parse.resolve_credential(
-                                database['username'], config
-                            )
+                                database['username'],
+                                config,
+                            ),
                         ),
                         ),
                     )
                     )
                     if 'username' in database
                     if 'username' in database
@@ -221,27 +223,27 @@ def dump_data_sources(
             )
             )
 
 
             logger.debug(
             logger.debug(
-                f'Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
+                f'Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}',
             )
             )
             if dry_run:
             if dry_run:
                 continue
                 continue
 
 
             if dump_format == 'directory':
             if dump_format == 'directory':
                 dump.create_parent_directory_for_dump(dump_filename)
                 dump.create_parent_directory_for_dump(dump_filename)
-                execute_command(
+                execute_command(  # noqa: S604
                     command,
                     command,
-                    shell=True,  # noqa: S604
+                    shell=True,
                     environment=environment,
                     environment=environment,
                 )
                 )
             else:
             else:
                 dump.create_named_pipe_for_dump(dump_filename)
                 dump.create_named_pipe_for_dump(dump_filename)
                 processes.append(
                 processes.append(
-                    execute_command(
+                    execute_command(  # noqa: S604
                         command,
                         command,
-                        shell=True,  # noqa: S604
+                        shell=True,
                         environment=environment,
                         environment=environment,
                         run_to_completion=False,
                         run_to_completion=False,
-                    )
+                    ),
                 )
                 )
 
 
     if not dry_run:
     if not dry_run:
@@ -249,14 +251,17 @@ def dump_data_sources(
             borgmatic.borg.pattern.Pattern(
             borgmatic.borg.pattern.Pattern(
                 os.path.join(borgmatic_runtime_directory, 'postgresql_databases'),
                 os.path.join(borgmatic_runtime_directory, 'postgresql_databases'),
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
-            )
+            ),
         )
         )
 
 
     return processes
     return processes
 
 
 
 
 def remove_data_source_dumps(
 def remove_data_source_dumps(
-    databases, config, borgmatic_runtime_directory, dry_run
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
     Remove all database dump files for this hook regardless of the given databases. Use the
@@ -264,12 +269,17 @@ def remove_data_source_dumps(
     actually remove anything.
     actually remove anything.
     '''
     '''
     dump.remove_data_source_dumps(
     dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'PostgreSQL', dry_run
+        make_dump_path(borgmatic_runtime_directory),
+        'PostgreSQL',
+        dry_run,
     )
     )
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    databases, config, borgmatic_runtime_directory, name=None
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
@@ -281,10 +291,14 @@ def make_data_source_dump_patterns(
     return (
     return (
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory), name, hostname='*'
+            make_dump_path(borgmatic_runtime_directory),
+            name,
+            hostname='*',
         ),
         ),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_source_directory), name, hostname='*'
+            make_dump_path(borgmatic_source_directory),
+            name,
+            hostname='*',
         ),
         ),
     )
     )
 
 
@@ -313,10 +327,11 @@ def restore_data_source_dump(
     '''
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     hostname = connection_params['hostname'] or data_source.get(
     hostname = connection_params['hostname'] or data_source.get(
-        'restore_hostname', data_source.get('hostname')
+        'restore_hostname',
+        data_source.get('hostname'),
     )
     )
     port = str(
     port = str(
-        connection_params['port'] or data_source.get('restore_port', data_source.get('port', ''))
+        connection_params['port'] or data_source.get('restore_port', data_source.get('port', '')),
     )
     )
     username = borgmatic.hooks.credential.parse.resolve_credential(
     username = borgmatic.hooks.credential.parse.resolve_credential(
         (
         (
@@ -372,7 +387,7 @@ def restore_data_source_dump(
         + tuple(
         + tuple(
             itertools.chain.from_iterable(('--schema', schema) for schema in data_source['schemas'])
             itertools.chain.from_iterable(('--schema', schema) for schema in data_source['schemas'])
             if data_source.get('schemas')
             if data_source.get('schemas')
-            else ()
+            else (),
         )
         )
     )
     )
 
 

+ 24 - 12
borgmatic/hooks/data_source/sqlite.py

@@ -60,7 +60,7 @@ def dump_data_sources(
 
 
         if not os.path.exists(database_path):
         if not os.path.exists(database_path):
             logger.warning(
             logger.warning(
-                f'No SQLite database at {database_path}; an empty database will be created and dumped'
+                f'No SQLite database at {database_path}; an empty database will be created and dumped',
             )
             )
 
 
         dump_path = make_dump_path(borgmatic_runtime_directory)
         dump_path = make_dump_path(borgmatic_runtime_directory)
@@ -68,14 +68,15 @@ def dump_data_sources(
 
 
         if os.path.exists(dump_filename):
         if os.path.exists(dump_filename):
             logger.warning(
             logger.warning(
-                f'Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
+                f'Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}',
             )
             )
             continue
             continue
 
 
         sqlite_command = tuple(
         sqlite_command = tuple(
             shlex.quote(part) for part in shlex.split(database.get('sqlite_command') or 'sqlite3')
             shlex.quote(part) for part in shlex.split(database.get('sqlite_command') or 'sqlite3')
         )
         )
-        command = sqlite_command + (
+        command = (
+            *sqlite_command,
             shlex.quote(database_path),
             shlex.quote(database_path),
             '.dump',
             '.dump',
             '>',
             '>',
@@ -83,14 +84,14 @@ def dump_data_sources(
         )
         )
 
 
         logger.debug(
         logger.debug(
-            f'Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
+            f'Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}',
         )
         )
         if dry_run:
         if dry_run:
             continue
             continue
 
 
         dump.create_named_pipe_for_dump(dump_filename)
         dump.create_named_pipe_for_dump(dump_filename)
         processes.append(
         processes.append(
-            execute_command(command, shell=True, run_to_completion=False)  # noqa: S604
+            execute_command(command, shell=True, run_to_completion=False),  # noqa: S604
         )
         )
 
 
     if not dry_run:
     if not dry_run:
@@ -98,14 +99,17 @@ def dump_data_sources(
             borgmatic.borg.pattern.Pattern(
             borgmatic.borg.pattern.Pattern(
                 os.path.join(borgmatic_runtime_directory, 'sqlite_databases'),
                 os.path.join(borgmatic_runtime_directory, 'sqlite_databases'),
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
                 source=borgmatic.borg.pattern.Pattern_source.HOOK,
-            )
+            ),
         )
         )
 
 
     return processes
     return processes
 
 
 
 
 def remove_data_source_dumps(
 def remove_data_source_dumps(
-    databases, config, borgmatic_runtime_directory, dry_run
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
     Remove all database dump files for this hook regardless of the given databases. Use the
@@ -116,7 +120,10 @@ def remove_data_source_dumps(
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    databases, config, borgmatic_runtime_directory, name=None
+    databases,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
     Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
@@ -128,10 +135,14 @@ def make_data_source_dump_patterns(
     return (
     return (
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(make_dump_path('borgmatic'), name, hostname='*'),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory), name, hostname='*'
+            make_dump_path(borgmatic_runtime_directory),
+            name,
+            hostname='*',
         ),
         ),
         dump.make_data_source_dump_filename(
         dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_source_directory), name, hostname='*'
+            make_dump_path(borgmatic_source_directory),
+            name,
+            hostname='*',
         ),
         ),
     )
     )
 
 
@@ -153,7 +164,8 @@ def restore_data_source_dump(
     '''
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     database_path = connection_params['restore_path'] or data_source.get(
     database_path = connection_params['restore_path'] or data_source.get(
-        'restore_path', data_source.get('path')
+        'restore_path',
+        data_source.get('path'),
     )
     )
 
 
     logger.debug(f'Restoring SQLite database at {database_path}{dry_run_label}')
     logger.debug(f'Restoring SQLite database at {database_path}{dry_run_label}')
@@ -170,7 +182,7 @@ def restore_data_source_dump(
         shlex.quote(part)
         shlex.quote(part)
         for part in shlex.split(data_source.get('sqlite_restore_command') or 'sqlite3')
         for part in shlex.split(data_source.get('sqlite_restore_command') or 'sqlite3')
     )
     )
-    restore_command = sqlite_restore_command + (shlex.quote(database_path),)
+    restore_command = (*sqlite_restore_command, shlex.quote(database_path))
     # Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
     # Don't give Borg local path so as to error on warnings, as "borg extract" only gives a warning
     # if the restore paths don't exist in the archive.
     # if the restore paths don't exist in the archive.
     execute_command_with_processes(
     execute_command_with_processes(

+ 32 - 24
borgmatic/hooks/data_source/zfs.py

@@ -45,8 +45,8 @@ def get_datasets_to_backup(zfs_command, patterns):
     Return the result as a sequence of Dataset instances, sorted by mount point.
     Return the result as a sequence of Dataset instances, sorted by mount point.
     '''
     '''
     list_output = borgmatic.execute.execute_command_and_capture_output(
     list_output = borgmatic.execute.execute_command_and_capture_output(
-        tuple(zfs_command.split(' '))
-        + (
+        (
+            *zfs_command.split(' '),
             'list',
             'list',
             '-H',
             '-H',
             '-t',
             '-t',
@@ -103,7 +103,8 @@ def get_datasets_to_backup(zfs_command, patterns):
                             else ()
                             else ()
                         )
                         )
                         + borgmatic.hooks.data_source.snapshot.get_contained_patterns(
                         + borgmatic.hooks.data_source.snapshot.get_contained_patterns(
-                            dataset.mount_point, candidate_patterns
+                            dataset.mount_point,
+                            candidate_patterns,
                         )
                         )
                     ),
                     ),
                 )
                 )
@@ -115,7 +116,7 @@ def get_datasets_to_backup(zfs_command, patterns):
                 )
                 )
             ),
             ),
             key=lambda dataset: dataset.mount_point,
             key=lambda dataset: dataset.mount_point,
-        )
+        ),
     )
     )
 
 
 
 
@@ -124,8 +125,8 @@ def get_all_dataset_mount_points(zfs_command):
     Given a ZFS command to run, return all ZFS datasets as a sequence of sorted mount points.
     Given a ZFS command to run, return all ZFS datasets as a sequence of sorted mount points.
     '''
     '''
     list_output = borgmatic.execute.execute_command_and_capture_output(
     list_output = borgmatic.execute.execute_command_and_capture_output(
-        tuple(zfs_command.split(' '))
-        + (
+        (
+            *zfs_command.split(' '),
             'list',
             'list',
             '-H',
             '-H',
             '-t',
             '-t',
@@ -143,8 +144,8 @@ def get_all_dataset_mount_points(zfs_command):
                 for line in list_output.splitlines()
                 for line in list_output.splitlines()
                 for mount_point in (line.rstrip(),)
                 for mount_point in (line.rstrip(),)
                 if mount_point != 'none'
                 if mount_point != 'none'
-            }
-        )
+            },
+        ),
     )
     )
 
 
 
 
@@ -154,8 +155,8 @@ def snapshot_dataset(zfs_command, full_snapshot_name):  # pragma: no cover
     snapshot.
     snapshot.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(zfs_command.split(' '))
-        + (
+        (
+            *zfs_command.split(' '),
             'snapshot',
             'snapshot',
             full_snapshot_name,
             full_snapshot_name,
         ),
         ),
@@ -173,8 +174,8 @@ def mount_snapshot(mount_command, full_snapshot_name, snapshot_mount_path):  # p
     os.makedirs(snapshot_mount_path, mode=0o700, exist_ok=True)
     os.makedirs(snapshot_mount_path, mode=0o700, exist_ok=True)
 
 
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(mount_command.split(' '))
-        + (
+        (
+            *mount_command.split(' '),
             '-t',
             '-t',
             'zfs',
             'zfs',
             '-o',
             '-o',
@@ -265,7 +266,7 @@ def dump_data_sources(
     for dataset in requested_datasets:
     for dataset in requested_datasets:
         full_snapshot_name = f'{dataset.name}@{snapshot_name}'
         full_snapshot_name = f'{dataset.name}@{snapshot_name}'
         logger.debug(
         logger.debug(
-            f'Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}'
+            f'Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}',
         )
         )
 
 
         if not dry_run:
         if not dry_run:
@@ -277,25 +278,29 @@ def dump_data_sources(
             normalized_runtime_directory,
             normalized_runtime_directory,
             'zfs_snapshots',
             'zfs_snapshots',
             hashlib.shake_256(dataset.mount_point.encode('utf-8')).hexdigest(
             hashlib.shake_256(dataset.mount_point.encode('utf-8')).hexdigest(
-                MOUNT_POINT_HASH_LENGTH
+                MOUNT_POINT_HASH_LENGTH,
             ),
             ),
             dataset.mount_point.lstrip(os.path.sep),
             dataset.mount_point.lstrip(os.path.sep),
         )
         )
 
 
         logger.debug(
         logger.debug(
-            f'Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            f'Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}',
         )
         )
 
 
         if dry_run:
         if dry_run:
             continue
             continue
 
 
         mount_snapshot(
         mount_snapshot(
-            hook_config.get('mount_command', 'mount'), full_snapshot_name, snapshot_mount_path
+            hook_config.get('mount_command', 'mount'),
+            full_snapshot_name,
+            snapshot_mount_path,
         )
         )
 
 
         for pattern in dataset.contained_patterns:
         for pattern in dataset.contained_patterns:
             snapshot_pattern = make_borg_snapshot_pattern(
             snapshot_pattern = make_borg_snapshot_pattern(
-                pattern, dataset, normalized_runtime_directory
+                pattern,
+                dataset,
+                normalized_runtime_directory,
             )
             )
 
 
             # Attempt to update the pattern in place, since pattern order matters to Borg.
             # Attempt to update the pattern in place, since pattern order matters to Borg.
@@ -312,7 +317,7 @@ def unmount_snapshot(umount_command, snapshot_mount_path):  # pragma: no cover
     Given a umount command to run and the mount path of a snapshot, unmount it.
     Given a umount command to run and the mount path of a snapshot, unmount it.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(umount_command.split(' ')) + (snapshot_mount_path,),
+        (*umount_command.split(' '), snapshot_mount_path),
         output_log_level=logging.DEBUG,
         output_log_level=logging.DEBUG,
         close_fds=True,
         close_fds=True,
     )
     )
@@ -324,8 +329,8 @@ def destroy_snapshot(zfs_command, full_snapshot_name):  # pragma: no cover
     it.
     it.
     '''
     '''
     borgmatic.execute.execute_command(
     borgmatic.execute.execute_command(
-        tuple(zfs_command.split(' '))
-        + (
+        (
+            *tuple(zfs_command.split(' ')),
             'destroy',
             'destroy',
             full_snapshot_name,
             full_snapshot_name,
         ),
         ),
@@ -340,8 +345,8 @@ def get_all_snapshots(zfs_command):
     form "dataset@snapshot".
     form "dataset@snapshot".
     '''
     '''
     list_output = borgmatic.execute.execute_command_and_capture_output(
     list_output = borgmatic.execute.execute_command_and_capture_output(
-        tuple(zfs_command.split(' '))
-        + (
+        (
+            *tuple(zfs_command.split(' ')),
             'list',
             'list',
             '-H',
             '-H',
             '-t',
             '-t',
@@ -355,7 +360,7 @@ def get_all_snapshots(zfs_command):
     return tuple(line.rstrip() for line in list_output.splitlines())
     return tuple(line.rstrip() for line in list_output.splitlines())
 
 
 
 
-def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):  # noqa: PLR0912
     '''
     '''
     Given a ZFS configuration dict, a configuration dict, the borgmatic runtime directory, and
     Given a ZFS configuration dict, a configuration dict, the borgmatic runtime directory, and
     whether this is a dry run, unmount and destroy any ZFS snapshots created by borgmatic. If this
     whether this is a dry run, unmount and destroy any ZFS snapshots created by borgmatic. If this
@@ -444,7 +449,10 @@ def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, d
 
 
 
 
 def make_data_source_dump_patterns(
 def make_data_source_dump_patterns(
-    hook_config, config, borgmatic_runtime_directory, name=None
+    hook_config,
+    config,
+    borgmatic_runtime_directory,
+    name=None,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
     Restores aren't implemented, because stored files can be extracted directly with "extract".

+ 2 - 2
borgmatic/hooks/dispatch.py

@@ -86,7 +86,7 @@ def call_hooks(function_name, config, hook_type, *args, **kwargs):
     return {
     return {
         hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         for hook_name in get_submodule_names(
         for hook_name in get_submodule_names(
-            importlib.import_module(f'borgmatic.hooks.{hook_type.value}')
+            importlib.import_module(f'borgmatic.hooks.{hook_type.value}'),
         )
         )
         if hook_name in config or f'{hook_name}_databases' in config
         if hook_name in config or f'{hook_name}_databases' in config
     }
     }
@@ -105,6 +105,6 @@ def call_hooks_even_if_unconfigured(function_name, config, hook_type, *args, **k
     return {
     return {
         hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         for hook_name in get_submodule_names(
         for hook_name in get_submodule_names(
-            importlib.import_module(f'borgmatic.hooks.{hook_type.value}')
+            importlib.import_module(f'borgmatic.hooks.{hook_type.value}'),
         )
         )
     }
     }

+ 9 - 7
borgmatic/hooks/monitoring/apprise.py

@@ -28,8 +28,10 @@ def initialize_monitor(hook_config, config, config_filename, monitoring_log_leve
 
 
     borgmatic.hooks.monitoring.logs.add_handler(
     borgmatic.hooks.monitoring.logs.add_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
-            HANDLER_IDENTIFIER, logs_size_limit, monitoring_log_level
-        )
+            HANDLER_IDENTIFIER,
+            logs_size_limit,
+            monitoring_log_level,
+        ),
     )
     )
 
 
 
 
@@ -39,8 +41,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     entries. If this is a dry run, then don't actually ping anything.
     entries. If this is a dry run, then don't actually ping anything.
     '''
     '''
     try:
     try:
-        import apprise
-        from apprise import NotifyFormat, NotifyType
+        import apprise  # noqa: PLC0415
+        from apprise import NotifyFormat, NotifyType  # noqa: PLC0415
     except ImportError:  # pragma: no cover
     except ImportError:  # pragma: no cover
         logger.warning('Unable to import Apprise in monitoring hook')
         logger.warning('Unable to import Apprise in monitoring hook')
         return
         return
@@ -81,13 +83,13 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
 
     body = state_config.get('body')
     body = state_config.get('body')
 
 
-    if state in (
+    if state in {
         borgmatic.hooks.monitoring.monitor.State.FINISH,
         borgmatic.hooks.monitoring.monitor.State.FINISH,
         borgmatic.hooks.monitoring.monitor.State.FAIL,
         borgmatic.hooks.monitoring.monitor.State.FAIL,
         borgmatic.hooks.monitoring.monitor.State.LOG,
         borgmatic.hooks.monitoring.monitor.State.LOG,
-    ):
+    }:
         formatted_logs = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
         formatted_logs = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
-            HANDLER_IDENTIFIER
+            HANDLER_IDENTIFIER,
         )
         )
         if formatted_logs:
         if formatted_logs:
             body += f'\n\n{formatted_logs}'
             body += f'\n\n{formatted_logs}'

+ 5 - 3
borgmatic/hooks/monitoring/cronhub.py

@@ -15,12 +15,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@@ -57,4 +60,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 5 - 3
borgmatic/hooks/monitoring/cronitor.py

@@ -15,12 +15,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@@ -52,4 +55,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 6 - 4
borgmatic/hooks/monitoring/healthchecks.py

@@ -37,8 +37,10 @@ def initialize_monitor(hook_config, config, config_filename, monitoring_log_leve
 
 
     borgmatic.hooks.monitoring.logs.add_handler(
     borgmatic.hooks.monitoring.logs.add_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
-            HANDLER_IDENTIFIER, ping_body_limit, monitoring_log_level
-        )
+            HANDLER_IDENTIFIER,
+            ping_body_limit,
+            monitoring_log_level,
+        ),
     )
     )
 
 
 
 
@@ -74,9 +76,9 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     logger.info(f'Pinging Healthchecks {state.name.lower()}{dry_run_label}')
     logger.info(f'Pinging Healthchecks {state.name.lower()}{dry_run_label}')
     logger.debug(f'Using Healthchecks ping URL {ping_url}')
     logger.debug(f'Using Healthchecks ping URL {ping_url}')
 
 
-    if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
+    if state in {monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG}:
         payload = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
         payload = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
-            HANDLER_IDENTIFIER
+            HANDLER_IDENTIFIER,
         )
         )
     else:
     else:
         payload = ''
         payload = ''

+ 2 - 3
borgmatic/hooks/monitoring/logs.py

@@ -1,3 +1,4 @@
+import contextlib
 import logging
 import logging
 
 
 IS_A_HOOK = False
 IS_A_HOOK = False
@@ -88,9 +89,7 @@ def remove_handler(identifier):
     '''
     '''
     logger = logging.getLogger()
     logger = logging.getLogger()
 
 
-    try:
+    with contextlib.suppress(ValueError):
         logger.removeHandler(get_handler(identifier))
         logger.removeHandler(get_handler(identifier))
-    except ValueError:
-        pass
 
 
     logger.setLevel(min(handler.level for handler in logger.handlers))
     logger.setLevel(min(handler.level for handler in logger.handlers))

+ 6 - 4
borgmatic/hooks/monitoring/loki.py

@@ -71,7 +71,10 @@ class Loki_log_buffer:
 
 
         try:
         try:
             result = requests.post(
             result = requests.post(
-                self.url, headers=request_header, data=request_body, timeout=TIMEOUT_SECONDS
+                self.url,
+                headers=request_header,
+                data=request_body,
+                timeout=TIMEOUT_SECONDS,
             )
             )
             result.raise_for_status()
             result.raise_for_status()
         except requests.RequestException:
         except requests.RequestException:
@@ -140,9 +143,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     Add an entry to the loki logger with the current state.
     Add an entry to the loki logger with the current state.
     '''
     '''
     for handler in tuple(logging.getLogger().handlers):
     for handler in tuple(logging.getLogger().handlers):
-        if isinstance(handler, Loki_log_handler):
-            if state in MONITOR_STATE_TO_LOKI.keys():
-                handler.raw(f'{MONITOR_STATE_TO_LOKI[state]} backup')
+        if isinstance(handler, Loki_log_handler) and state in MONITOR_STATE_TO_LOKI:
+            handler.raw(f'{MONITOR_STATE_TO_LOKI[state]} backup')
 
 
 
 
 def destroy_monitor(hook_config, config, monitoring_log_level, dry_run):
 def destroy_monitor(hook_config, config, monitoring_log_level, dry_run):

+ 16 - 8
borgmatic/hooks/monitoring/ntfy.py

@@ -11,12 +11,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@@ -54,13 +57,16 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
 
         try:
         try:
             username = borgmatic.hooks.credential.parse.resolve_credential(
             username = borgmatic.hooks.credential.parse.resolve_credential(
-                hook_config.get('username'), config
+                hook_config.get('username'),
+                config,
             )
             )
             password = borgmatic.hooks.credential.parse.resolve_credential(
             password = borgmatic.hooks.credential.parse.resolve_credential(
-                hook_config.get('password'), config
+                hook_config.get('password'),
+                config,
             )
             )
             access_token = borgmatic.hooks.credential.parse.resolve_credential(
             access_token = borgmatic.hooks.credential.parse.resolve_credential(
-                hook_config.get('access_token'), config
+                hook_config.get('access_token'),
+                config,
             )
             )
         except ValueError as error:
         except ValueError as error:
             logger.warning(f'Ntfy credential error: {error}')
             logger.warning(f'Ntfy credential error: {error}')
@@ -71,7 +77,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if access_token is not None:
         if access_token is not None:
             if username or password:
             if username or password:
                 logger.warning(
                 logger.warning(
-                    'ntfy access_token is set but so is username/password, only using access_token'
+                    'ntfy access_token is set but so is username/password, only using access_token',
                 )
                 )
 
 
             auth = requests.auth.HTTPBasicAuth('', access_token)
             auth = requests.auth.HTTPBasicAuth('', access_token)
@@ -87,7 +93,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             logging.getLogger('urllib3').setLevel(logging.ERROR)
             logging.getLogger('urllib3').setLevel(logging.ERROR)
             try:
             try:
                 response = requests.post(
                 response = requests.post(
-                    f'{base_url}/{topic}', headers=headers, auth=auth, timeout=TIMEOUT_SECONDS
+                    f'{base_url}/{topic}',
+                    headers=headers,
+                    auth=auth,
+                    timeout=TIMEOUT_SECONDS,
                 )
                 )
                 if not response.ok:
                 if not response.ok:
                     response.raise_for_status()
                     response.raise_for_status()
@@ -99,4 +108,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 11 - 6
borgmatic/hooks/monitoring/pagerduty.py

@@ -34,8 +34,10 @@ def initialize_monitor(hook_config, config, config_filename, monitoring_log_leve
 
 
     borgmatic.hooks.monitoring.logs.add_handler(
     borgmatic.hooks.monitoring.logs.add_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
         borgmatic.hooks.monitoring.logs.Forgetful_buffering_handler(
-            HANDLER_IDENTIFIER, ping_body_limit, monitoring_log_level
-        )
+            HANDLER_IDENTIFIER,
+            ping_body_limit,
+            monitoring_log_level,
+        ),
     )
     )
 
 
 
 
@@ -56,14 +58,15 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
 
     try:
     try:
         integration_key = borgmatic.hooks.credential.parse.resolve_credential(
         integration_key = borgmatic.hooks.credential.parse.resolve_credential(
-            hook_config.get('integration_key'), config
+            hook_config.get('integration_key'),
+            config,
         )
         )
     except ValueError as error:
     except ValueError as error:
         logger.warning(f'PagerDuty credential error: {error}')
         logger.warning(f'PagerDuty credential error: {error}')
         return
         return
 
 
     logs_payload = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
     logs_payload = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
-        HANDLER_IDENTIFIER
+        HANDLER_IDENTIFIER,
     )
     )
 
 
     hostname = platform.node()
     hostname = platform.node()
@@ -87,7 +90,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
                     'logs': logs_payload,
                     'logs': logs_payload,
                 },
                 },
             },
             },
-        }
+        },
     )
     )
 
 
     if dry_run:
     if dry_run:
@@ -96,7 +99,9 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     logging.getLogger('urllib3').setLevel(logging.ERROR)
     logging.getLogger('urllib3').setLevel(logging.ERROR)
     try:
     try:
         response = requests.post(
         response = requests.post(
-            EVENTS_API_URL, data=payload.encode('utf-8'), timeout=TIMEOUT_SECONDS
+            EVENTS_API_URL,
+            data=payload.encode('utf-8'),
+            timeout=TIMEOUT_SECONDS,
         )
         )
         if not response.ok:
         if not response.ok:
             response.raise_for_status()
             response.raise_for_status()

+ 11 - 9
borgmatic/hooks/monitoring/pushover.py

@@ -12,12 +12,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@@ -37,7 +40,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
 
     try:
     try:
         token = borgmatic.hooks.credential.parse.resolve_credential(
         token = borgmatic.hooks.credential.parse.resolve_credential(
-            hook_config.get('token'), config
+            hook_config.get('token'),
+            config,
         )
         )
         user = borgmatic.hooks.credential.parse.resolve_credential(hook_config.get('user'), config)
         user = borgmatic.hooks.credential.parse.resolve_credential(hook_config.get('user'), config)
     except ValueError as error:
     except ValueError as error:
@@ -54,11 +58,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if 'retry' not in state_config:
         if 'retry' not in state_config:
             logger.info('Setting retry to default (30 sec)')
             logger.info('Setting retry to default (30 sec)')
             state_config['retry'] = 30
             state_config['retry'] = 30
-    else:
-        if 'expire' in state_config or 'retry' in state_config:
-            raise ValueError(
-                'The configuration parameters retry and expire should not be set when priority is not equal to 2. Please remove them from the configuration.'
-            )
+    elif 'expire' in state_config or 'retry' in state_config:
+        raise ValueError(
+            'The configuration parameters retry and expire should not be set when priority is not equal to 2. Please remove them from the configuration.',
+        )
 
 
     state_config = {
     state_config = {
         key: (int(value) if key == 'html' else value) for key, value in state_config.items()
         key: (int(value) if key == 'html' else value) for key, value in state_config.items()
@@ -93,4 +96,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 7 - 5
borgmatic/hooks/monitoring/sentry.py

@@ -10,16 +10,19 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 DATA_SOURCE_NAME_URL_PATTERN = re.compile(
 DATA_SOURCE_NAME_URL_PATTERN = re.compile(
-    '^(?P<protocol>.+)://(?P<username>.+)@(?P<hostname>.+)/(?P<project_id>.+)$'
+    r'^(?P<protocol>.+)://(?P<username>.+)@(?P<hostname>.+)/(?P<project_id>.+)$',
 )
 )
 
 
 
 
@@ -31,7 +34,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     '''
     '''
     run_states = hook_config.get('states', ['start', 'finish', 'fail'])
     run_states = hook_config.get('states', ['start', 'finish', 'fail'])
 
 
-    if not state.name.lower() in run_states:
+    if state.name.lower() not in run_states:
         return
         return
 
 
     dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
     dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
@@ -75,4 +78,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 5 - 3
borgmatic/hooks/monitoring/uptime_kuma.py

@@ -9,12 +9,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    push_url, config, config_filename, monitoring_log_level, dry_run
+    push_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
 def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
@@ -55,4 +58,3 @@ def destroy_monitor(push_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 12 - 7
borgmatic/hooks/monitoring/zabbix.py

@@ -11,12 +11,15 @@ TIMEOUT_SECONDS = 10
 
 
 
 
 def initialize_monitor(
 def initialize_monitor(
-    ping_url, config, config_filename, monitoring_log_level, dry_run
+    ping_url,
+    config,
+    config_filename,
+    monitoring_log_level,
+    dry_run,
 ):  # pragma: no cover
 ):  # pragma: no cover
     '''
     '''
     No initialization is necessary for this monitor.
     No initialization is necessary for this monitor.
     '''
     '''
-    pass
 
 
 
 
 def send_zabbix_request(server, headers, data):
 def send_zabbix_request(server, headers, data):
@@ -55,7 +58,7 @@ def send_zabbix_request(server, headers, data):
         return None
         return None
 
 
 
 
-def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):
+def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run):  # noqa: PLR0911, PLR0912, PLR0915
     '''
     '''
     Update the configured Zabbix item using either the itemid, or a host and key.
     Update the configured Zabbix item using either the itemid, or a host and key.
     If this is a dry run, then don't actually update anything.
     If this is a dry run, then don't actually update anything.
@@ -77,13 +80,16 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
 
     try:
     try:
         username = borgmatic.hooks.credential.parse.resolve_credential(
         username = borgmatic.hooks.credential.parse.resolve_credential(
-            hook_config.get('username'), config
+            hook_config.get('username'),
+            config,
         )
         )
         password = borgmatic.hooks.credential.parse.resolve_credential(
         password = borgmatic.hooks.credential.parse.resolve_credential(
-            hook_config.get('password'), config
+            hook_config.get('password'),
+            config,
         )
         )
         api_key = borgmatic.hooks.credential.parse.resolve_credential(
         api_key = borgmatic.hooks.credential.parse.resolve_credential(
-            hook_config.get('api_key'), config
+            hook_config.get('api_key'),
+            config,
         )
         )
     except ValueError as error:
     except ValueError as error:
         logger.warning(f'Zabbix credential error: {error}')
         logger.warning(f'Zabbix credential error: {error}')
@@ -184,4 +190,3 @@ def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  #
     '''
     '''
     No destruction is necessary for this monitor.
     No destruction is necessary for this monitor.
     '''
     '''
-    pass

+ 35 - 39
borgmatic/logger.py

@@ -15,10 +15,7 @@ def to_bool(arg):
     if isinstance(arg, str):
     if isinstance(arg, str):
         arg = arg.lower()
         arg = arg.lower()
 
 
-    if arg in ('yes', 'on', '1', 'true', 1):
-        return True
-
-    return False
+    return arg in {'yes', 'on', '1', 'true', 1}
 
 
 
 
 def interactive_console():
 def interactive_console():
@@ -59,12 +56,12 @@ class Multi_stream_handler(logging.Handler):
     '''
     '''
 
 
     def __init__(self, log_level_to_stream_handler):
     def __init__(self, log_level_to_stream_handler):
-        super(Multi_stream_handler, self).__init__()
+        super().__init__()
         self.log_level_to_handler = log_level_to_stream_handler
         self.log_level_to_handler = log_level_to_stream_handler
         self.handlers = set(self.log_level_to_handler.values())
         self.handlers = set(self.log_level_to_handler.values())
 
 
     def flush(self):  # pragma: no cover
     def flush(self):  # pragma: no cover
-        super(Multi_stream_handler, self).flush()
+        super().flush()
 
 
         for handler in self.handlers:
         for handler in self.handlers:
             handler.flush()
             handler.flush()
@@ -75,29 +72,29 @@ class Multi_stream_handler(logging.Handler):
         '''
         '''
         self.log_level_to_handler[record.levelno].emit(record)
         self.log_level_to_handler[record.levelno].emit(record)
 
 
-    def setFormatter(self, formatter):  # pragma: no cover
-        super(Multi_stream_handler, self).setFormatter(formatter)
+    def setFormatter(self, formatter):  # pragma: no cover  # noqa: N802
+        super().setFormatter(formatter)
 
 
         for handler in self.handlers:
         for handler in self.handlers:
             handler.setFormatter(formatter)
             handler.setFormatter(formatter)
 
 
-    def setLevel(self, level):  # pragma: no cover
-        super(Multi_stream_handler, self).setLevel(level)
+    def setLevel(self, level):  # pragma: no cover  # noqa: N802
+        super().setLevel(level)
 
 
         for handler in self.handlers:
         for handler in self.handlers:
             handler.setLevel(level)
             handler.setLevel(level)
 
 
 
 
 class Log_prefix_formatter(logging.Formatter):
 class Log_prefix_formatter(logging.Formatter):
-    def __init__(self, fmt='{prefix}{message}', style='{', *args, **kwargs):  # pragma: no cover
+    def __init__(self, fmt='{prefix}{message}', *args, style='{', **kwargs):
         self.prefix = None
         self.prefix = None
 
 
-        super(Log_prefix_formatter, self).__init__(fmt=fmt, style=style, *args, **kwargs)
+        super().__init__(*args, fmt=fmt, style=style, **kwargs)
 
 
     def format(self, record):  # pragma: no cover
     def format(self, record):  # pragma: no cover
         record.prefix = f'{self.prefix}: ' if self.prefix else ''
         record.prefix = f'{self.prefix}: ' if self.prefix else ''
 
 
-        return super(Log_prefix_formatter, self).format(record)
+        return super().format(record)
 
 
 
 
 class Color(enum.Enum):
 class Color(enum.Enum):
@@ -112,28 +109,27 @@ class Color(enum.Enum):
 class Console_color_formatter(logging.Formatter):
 class Console_color_formatter(logging.Formatter):
     def __init__(self, *args, **kwargs):
     def __init__(self, *args, **kwargs):
         self.prefix = None
         self.prefix = None
-        super(Console_color_formatter, self).__init__(
-            '{prefix}{message}', style='{', *args, **kwargs
+        super().__init__(
+            '{prefix}{message}',
+            *args,
+            style='{',
+            **kwargs,
         )
         )
 
 
     def format(self, record):
     def format(self, record):
         add_custom_log_levels()
         add_custom_log_levels()
 
 
-        color = (
-            {
-                logging.CRITICAL: Color.RED,
-                logging.ERROR: Color.RED,
-                logging.WARN: Color.YELLOW,
-                logging.ANSWER: Color.MAGENTA,
-                logging.INFO: Color.GREEN,
-                logging.DEBUG: Color.CYAN,
-            }
-            .get(record.levelno)
-            .value
-        )
+        color = {
+            logging.CRITICAL: Color.RED,
+            logging.ERROR: Color.RED,
+            logging.WARNING: Color.YELLOW,
+            logging.ANSWER: Color.MAGENTA,
+            logging.INFO: Color.GREEN,
+            logging.DEBUG: Color.CYAN,
+        }.get(record.levelno).value
         record.prefix = f'{self.prefix}: ' if self.prefix else ''
         record.prefix = f'{self.prefix}: ' if self.prefix else ''
 
 
-        return color_text(color, super(Console_color_formatter, self).format(record))
+        return color_text(color, super().format(record))
 
 
 
 
 def ansi_escape_code(color):  # pragma: no cover
 def ansi_escape_code(color):  # pragma: no cover
@@ -177,18 +173,18 @@ def add_logging_level(level_name, level_number):
     if not hasattr(logging.getLoggerClass(), method_name):
     if not hasattr(logging.getLoggerClass(), method_name):
 
 
         def log_to_root(message, *args, **kwargs):  # pragma: no cover
         def log_to_root(message, *args, **kwargs):  # pragma: no cover
-            logging.log(level_number, message, *args, **kwargs)
+            logging.log(level_number, message, *args, **kwargs)  # noqa: LOG015
 
 
         setattr(logging, method_name, log_to_root)
         setattr(logging, method_name, log_to_root)
 
 
 
 
-ANSWER = logging.WARN - 5
+ANSWER = logging.WARNING - 5
 DISABLED = logging.CRITICAL + 10
 DISABLED = logging.CRITICAL + 10
 
 
 
 
 def add_custom_log_levels():  # pragma: no cover
 def add_custom_log_levels():  # pragma: no cover
     '''
     '''
-    Add a custom log level between WARN and INFO for user-requested answers.
+    Add a custom log level between WARNING and INFO for user-requested answers.
     '''
     '''
     add_logging_level('ANSWER', ANSWER)
     add_logging_level('ANSWER', ANSWER)
     add_logging_level('DISABLED', DISABLED)
     add_logging_level('DISABLED', DISABLED)
@@ -277,11 +273,11 @@ class Delayed_logging_handler(logging.handlers.BufferingHandler):
     '''
     '''
 
 
     def __init__(self):
     def __init__(self):
-        super(Delayed_logging_handler, self).__init__(capacity=0)
+        super().__init__(capacity=0)
 
 
         self.targets = None
         self.targets = None
 
 
-    def shouldFlush(self, record):
+    def shouldFlush(self, record):  # noqa: N802
         return self.targets is not None
         return self.targets is not None
 
 
     def flush(self):
     def flush(self):
@@ -361,11 +357,11 @@ def configure_logging(
             logging.DISABLED: console_disabled,
             logging.DISABLED: console_disabled,
             logging.CRITICAL: console_error_handler,
             logging.CRITICAL: console_error_handler,
             logging.ERROR: console_error_handler,
             logging.ERROR: console_error_handler,
-            logging.WARN: console_error_handler,
+            logging.WARNING: console_error_handler,
             logging.ANSWER: console_standard_handler,
             logging.ANSWER: console_standard_handler,
             logging.INFO: console_standard_handler,
             logging.INFO: console_standard_handler,
             logging.DEBUG: console_standard_handler,
             logging.DEBUG: console_standard_handler,
-        }
+        },
     )
     )
 
 
     if color_enabled:
     if color_enabled:
@@ -390,8 +386,8 @@ def configure_logging(
             syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
             syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
             syslog_handler.setFormatter(
             syslog_handler.setFormatter(
                 Log_prefix_formatter(
                 Log_prefix_formatter(
-                    'borgmatic: {levelname} {prefix}{message}',  # noqa: FS003
-                )
+                    'borgmatic: {levelname} {prefix}{message}',
+                ),
             )
             )
             syslog_handler.setLevel(syslog_log_level)
             syslog_handler.setLevel(syslog_log_level)
             handlers.append(syslog_handler)
             handlers.append(syslog_handler)
@@ -400,8 +396,8 @@ def configure_logging(
         file_handler = logging.handlers.WatchedFileHandler(log_file)
         file_handler = logging.handlers.WatchedFileHandler(log_file)
         file_handler.setFormatter(
         file_handler.setFormatter(
             Log_prefix_formatter(
             Log_prefix_formatter(
-                log_file_format or '[{asctime}] {levelname}: {prefix}{message}',  # noqa: FS003
-            )
+                log_file_format or '[{asctime}] {levelname}: {prefix}{message}',
+            ),
         )
         )
         file_handler.setLevel(log_file_log_level)
         file_handler.setLevel(log_file_log_level)
         handlers.append(file_handler)
         handlers.append(file_handler)

+ 23 - 20
docs/how-to/develop-on-borgmatic.md

@@ -50,38 +50,44 @@ code reference](https://torsion.org/borgmatic/docs/reference/source-code/).
 ## Automated tests
 ## Automated tests
 
 
 Assuming you've cloned the borgmatic source code as described above and you're
 Assuming you've cloned the borgmatic source code as described above and you're
-in the `borgmatic/` working copy, install tox, which is used for setting up
-testing environments. You can either install a system package of tox (likely
-called `tox` or `python-tox`) or you can install tox with pipx:
+in the `borgmatic/` working copy, install [tox](https://tox.wiki/), which is
+used for setting up testing environments. You can either install a system
+package of tox (likely called `tox` or `python-tox`) or you can install tox with
+pipx:
 
 
 ```bash
 ```bash
 pipx install tox
 pipx install tox
 ```
 ```
 
 
+Also install [Ruff](https://docs.astral.sh/ruff/), which tox calls for code
+linting and formatting:
+
+```bash
+pipx install ruff
+```
+
 Finally, to actually run tests, run tox from inside the borgmatic
 Finally, to actually run tests, run tox from inside the borgmatic
-sourcedirectory:
+source directory:
 
 
 ```bash
 ```bash
 tox
 tox
 ```
 ```
 
 
-### Code formatting
+### Code style
 
 
-If when running tests, you get an error from the
-[Black](https://black.readthedocs.io/en/stable/) code formatter about files
-that would be reformatted, you can ask Black to format them for you via the
-following:
+If when running tests, you get an error from Ruff's linter about files that
+don't meet linting requirements, you can ask Ruff to attempt to fix them for you
+via the following:
 
 
 ```bash
 ```bash
-tox -e black
+tox -e lint
 ```
 ```
 
 
-And if you get a complaint from the
-[isort](https://github.com/timothycrosley/isort) Python import orderer, you
-can ask isort to order your imports for you:
+And if you get an error from the Ruff's code formatter about files that would be
+reformatted, you can ask Ruff to format them for you:
 
 
 ```bash
 ```bash
-tox -e isort
+tox -e format
 ```
 ```
 
 
 Similarly, if you get errors about spelling mistakes in source code, you can
 Similarly, if you get errors about spelling mistakes in source code, you can
@@ -89,7 +95,7 @@ ask [codespell](https://github.com/codespell-project/codespell) to correct
 them:
 them:
 
 
 ```bash
 ```bash
-tox -e codespell
+tox -e spell
 ```
 ```
 
 
 
 
@@ -162,11 +168,8 @@ the following deviations from it:
  * Prefer functional code where it makes sense, e.g. when constructing a
  * Prefer functional code where it makes sense, e.g. when constructing a
    command (to subsequently execute imperatively).
    command (to subsequently execute imperatively).
 
 
-borgmatic uses the [Black](https://black.readthedocs.io/en/stable/) code
-formatter, the [Flake8](http://flake8.pycqa.org/en/latest/) code checker, and
-the [isort](https://github.com/timothycrosley/isort) import orderer, so
-certain code style requirements are enforced when running automated tests. See
-the Black, Flake8, and isort documentation for more information.
+Since borgmatic uses Ruff for code lining and formatting, many other code style
+requirements are also enforced when running automated tests.
 
 
 
 
 ## Continuous integration
 ## Continuous integration

+ 83 - 9
pyproject.toml

@@ -42,19 +42,93 @@ build-backend = "setuptools.build_meta"
 include = ["borgmatic*"]
 include = ["borgmatic*"]
 namespaces = false
 namespaces = false
 
 
-[tool.black]
-line-length = 100
-skip-string-normalization = true
-
 [tool.pytest.ini_options]
 [tool.pytest.ini_options]
 testpaths = "tests"
 testpaths = "tests"
 addopts = "--cov-report term-missing:skip-covered --cov=borgmatic --no-cov-on-fail --cov-fail-under=100 --ignore=tests/end-to-end"
 addopts = "--cov-report term-missing:skip-covered --cov=borgmatic --no-cov-on-fail --cov-fail-under=100 --ignore=tests/end-to-end"
 
 
-[tool.isort]
-profile = "black"
-known_first_party = "borgmatic"
-line_length = 100
-skip = ".tox"
+[tool.ruff]
+line-length = 100
+exclude = ["*.*/*"]
+
+[tool.ruff.format]
+quote-style = "preserve"
+
+[tool.ruff.lint]
+preview = true
+extend-select = [
+    "A",  # flake8-builtins: builtin shadowing
+    "B",  # flake8-bugbear: bugs and design problems
+    "BLE",  # flak8-blind-except: "except:" without exception type
+    "C4",  # flake8-comprehensions: generators and comprehensions
+    "COM",  # flake8-commas: trailing commas
+    "DTZ",  # flake8-datetimez: naive datetime
+    "E",  # pycodestyle: errors
+    "F",  # pyflakes: various linting
+    "ERA",  # eradicate: find commented out code
+    "FLY",  # flynt: f-string instead of string join
+    "FIX",  # flake8-fixme: leftover FIXMEs and TODOs
+    "I",  # isort: import ordering
+    "ISC",  # flake8-implicit-str-concat: implicit string concatenation
+    "LOG",  # flake8-logging: standard library logging
+    "N",  # pep8-naming: PEP-8 naming conventions
+    "PERF",  # perflint: performance linting
+    "PIE",  # flake8-pie: various linting
+    "PL",  # pylint: various linting
+    "Q",  # flake8-quotes: string quoting
+    "RET",  # flake-return: return statement
+    "RUF",  # Ruff-specific rules
+    "S",  # flake8-bandit: security testing
+    "SIM",  # flake-simplify: code simplifications
+    "T20",  # flake8-print: print statements
+    "TID",  # flake8-tidy-imports: absolute imports
+    "UP",  # pyupgrade: upgrade syntax for newer versions of Python
+    "W",  # pycodestyle: warnings
+    "YTT",  # flake8-202: sys.version misuse
+]
+ignore = [
+    "C408",  # unnecessary dict() call (conflicts with makeLogRecord())
+    "COM812",  # trailing comma missing (conflicts with formatter)
+    "B904",  # unchained exception raised within "except:" clause
+    "E501",  # line too long
+    "ISC001",  # implicit string concatenation on one line (conflicts with formatter)
+    "N801",  # class name not title case
+    "N818",  # exception class name doesn't end in "Error"
+    "PLR0913",  # too many positional arguments in function definition
+    "PLR0914",  # too many local variables
+    "PLR0917",  # too many positional arguments
+    "S105",  # hard-coded password
+    "S404",  # subprocess import
+    "SIM115",  # open() without context manager
+    "SIM905",  # split() on literal string
+]
+
+[tool.ruff.lint.flake8-quotes]
+docstring-quotes = "single"
+inline-quotes = "single"
+multiline-quotes = "single"
+
+[tool.ruff.lint.isort]
+known-first-party = ["borgmatic"]
+
+[tool.ruff.lint.per-file-ignores]
+"tests/**/*.py" = [
+    "C406",  # unnecessary list literal
+    "N802",  # uppercase in function name
+    "PLC1901",  # comparison to empty string
+    "PLR2004",  # magic value
+    "PLW1514",  # open() without encoding
+    "S101",  # asserts
+    "S106",  # hard-coded password
+    "S108",  # insecure usage of temporary file
+    "S602",  # shell=True
+    "S603",  # subprocess call
+    "S604",  # shell=True
+    "S607",  # executing a relative path
+    "TID252",  # relative import from parent
+]
+"tests/end-to-end/commands/**/*.py" = [
+    "T201",  # print statement
+]
 
 
 [tool.codespell]
 [tool.codespell]
 skip = ".git,.tox,build"
 skip = ".git,.tox,build"

+ 1 - 1
scripts/run-full-tests

@@ -18,7 +18,7 @@ if [ -z "$TEST_CONTAINER" ]; then
 fi
 fi
 
 
 apk add --no-cache python3 py3-pip borgbackup postgresql17-client mariadb-client mongodb-tools \
 apk add --no-cache python3 py3-pip borgbackup postgresql17-client mariadb-client mongodb-tools \
-    py3-mongo py3-regex py3-ruamel.yaml py3-ruamel.yaml.clib py3-tox py3-yaml bash sqlite fish
+    py3-mongo py3-regex py3-ruamel.yaml py3-ruamel.yaml.clib py3-tox py3-yaml ruff sqlite bash fish
 export COVERAGE_FILE=/tmp/.coverage
 export COVERAGE_FILE=/tmp/.coverage
 
 
 tox --workdir /tmp/.tox --sitepackages
 tox --workdir /tmp/.tox --sitepackages

+ 13 - 24
test_requirements.txt

@@ -1,37 +1,26 @@
-appdirs==1.4.4
 apprise==1.9.3
 apprise==1.9.3
 attrs==25.3.0
 attrs==25.3.0
-bandit==1.8.5
-black==25.1.0
-certifi==2025.6.15
-chardet==5.2.0
+certifi==2025.7.14
+charset-normalizer==3.4.2
 click==8.2.1
 click==8.2.1
 codespell==2.4.1
 codespell==2.4.1
-coverage==7.9.1
-flake8==7.3.0
-flake8-bandit==4.1.1
-flake8-clean-block==0.1.2
-flake8-eradicate==1.5.0
-flake8-quotes==3.4.0
-flake8-use-fstring==1.4
-flake8-variables-names==0.0.6
-flexmock==0.12.1
+coverage==7.9.2
+flexmock==0.12.2
 idna==3.10
 idna==3.10
-isort==6.0.1
-jsonschema==4.24.0
+iniconfig==2.1.0
+jsonschema==4.24.1
+jsonschema-specifications==2025.4.1
 Markdown==3.8.2
 Markdown==3.8.2
-mccabe==0.7.0
+oauthlib==3.3.1
 packaging==25.0
 packaging==25.0
-pathspec==0.12.1
-pbr==6.1.1
 pluggy==1.6.0
 pluggy==1.6.0
-py==1.11.0
-pycodestyle==2.14.0
-pyflakes==3.4.0
+Pygments==2.19.2
 pytest==8.4.1
 pytest==8.4.1
 pytest-cov==6.2.1
 pytest-cov==6.2.1
 PyYAML>5.0.0
 PyYAML>5.0.0
-regex
+referencing==0.36.2
 requests==2.32.4
 requests==2.32.4
+requests-oauthlib==2.0.0
+rpds-py==0.26.0
 ruamel.yaml>0.15.0
 ruamel.yaml>0.15.0
-toml==0.10.2
+urllib3==2.5.0

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно