Bläddra i källkod

Log the repository path or label on every relevant log message, not just some logs (#635).

Reviewed-on: https://projects.torsion.org/borgmatic-collective/borgmatic/pulls/980
Dan Helfman 4 månader sedan
förälder
incheckning
09933c3dc7
89 ändrade filer med 961 tillägg och 942 borttagningar
  1. 1 0
      NEWS
  2. 1 3
      borgmatic/actions/borg.py
  3. 1 3
      borgmatic/actions/break_lock.py
  4. 1 3
      borgmatic/actions/change_passphrase.py
  5. 16 23
      borgmatic/actions/check.py
  6. 3 6
      borgmatic/actions/compact.py
  7. 0 1
      borgmatic/actions/config/bootstrap.py
  8. 2 8
      borgmatic/actions/create.py
  9. 1 1
      borgmatic/actions/delete.py
  10. 1 1
      borgmatic/actions/export_key.py
  11. 1 3
      borgmatic/actions/export_tar.py
  12. 1 3
      borgmatic/actions/extract.py
  13. 1 3
      borgmatic/actions/info.py
  14. 2 2
      borgmatic/actions/list.py
  15. 2 4
      borgmatic/actions/mount.py
  16. 1 1
      borgmatic/actions/prune.py
  17. 1 1
      borgmatic/actions/repo_create.py
  18. 1 2
      borgmatic/actions/repo_delete.py
  19. 1 3
      borgmatic/actions/repo_info.py
  20. 1 1
      borgmatic/actions/repo_list.py
  21. 6 18
      borgmatic/actions/restore.py
  22. 1 3
      borgmatic/actions/transfer.py
  23. 1 1
      borgmatic/borg/change_passphrase.py
  24. 1 1
      borgmatic/borg/compact.py
  25. 6 13
      borgmatic/borg/create.py
  26. 1 1
      borgmatic/borg/export_key.py
  27. 1 1
      borgmatic/borg/export_tar.py
  28. 1 1
      borgmatic/borg/list.py
  29. 2 2
      borgmatic/borg/repo_create.py
  30. 1 1
      borgmatic/borg/repo_list.py
  31. 76 72
      borgmatic/commands/borgmatic.py
  32. 7 8
      borgmatic/config/paths.py
  33. 10 7
      borgmatic/execute.py
  34. 9 11
      borgmatic/hooks/command.py
  35. 13 17
      borgmatic/hooks/data_source/bootstrap.py
  36. 20 24
      borgmatic/hooks/data_source/btrfs.py
  37. 2 2
      borgmatic/hooks/data_source/dump.py
  38. 25 32
      borgmatic/hooks/data_source/lvm.py
  39. 25 37
      borgmatic/hooks/data_source/mariadb.py
  40. 19 24
      borgmatic/hooks/data_source/mongodb.py
  41. 25 37
      borgmatic/hooks/data_source/mysql.py
  42. 23 31
      borgmatic/hooks/data_source/postgresql.py
  43. 22 26
      borgmatic/hooks/data_source/sqlite.py
  44. 22 28
      borgmatic/hooks/data_source/zfs.py
  45. 18 19
      borgmatic/hooks/dispatch.py
  46. 4 4
      borgmatic/hooks/monitoring/apprise.py
  47. 5 9
      borgmatic/hooks/monitoring/cronhub.py
  48. 5 9
      borgmatic/hooks/monitoring/cronitor.py
  49. 6 10
      borgmatic/hooks/monitoring/healthchecks.py
  50. 2 2
      borgmatic/hooks/monitoring/loki.py
  51. 8 14
      borgmatic/hooks/monitoring/ntfy.py
  52. 5 7
      borgmatic/hooks/monitoring/pagerduty.py
  53. 5 7
      borgmatic/hooks/monitoring/pushover.py
  54. 6 10
      borgmatic/hooks/monitoring/sentry.py
  55. 4 8
      borgmatic/hooks/monitoring/uptime_kuma.py
  56. 16 18
      borgmatic/hooks/monitoring/zabbix.py
  57. 84 5
      borgmatic/logger.py
  58. 6 10
      docs/how-to/monitor-your-backups.md
  59. 0 0
      tests/end-to-end/hooks/__init__.py
  60. 0 0
      tests/end-to-end/hooks/data_source/__init__.py
  61. 0 0
      tests/end-to-end/hooks/data_source/test_btrfs.py
  62. 0 0
      tests/end-to-end/hooks/data_source/test_database.py
  63. 0 0
      tests/end-to-end/hooks/data_source/test_lvm.py
  64. 0 0
      tests/end-to-end/hooks/data_source/test_zfs.py
  65. 143 0
      tests/end-to-end/hooks/monitoring/test_monitoring.py
  66. 1 1
      tests/end-to-end/test_override.py
  67. 2 2
      tests/integration/hooks/monitoring/test_apprise.py
  68. 2 2
      tests/integration/hooks/monitoring/test_healthchecks.py
  69. 3 3
      tests/integration/hooks/monitoring/test_loki.py
  70. 0 8
      tests/unit/actions/test_check.py
  71. 4 20
      tests/unit/actions/test_restore.py
  72. 3 9
      tests/unit/borg/test_create.py
  73. 48 18
      tests/unit/commands/test_borgmatic.py
  74. 15 17
      tests/unit/config/test_paths.py
  75. 0 7
      tests/unit/hooks/data_source/test_bootstrap.py
  76. 0 15
      tests/unit/hooks/data_source/test_btrfs.py
  77. 3 3
      tests/unit/hooks/data_source/test_dump.py
  78. 0 19
      tests/unit/hooks/data_source/test_lvm.py
  79. 8 42
      tests/unit/hooks/data_source/test_mariadb.py
  80. 1 20
      tests/unit/hooks/data_source/test_mongodb.py
  81. 8 42
      tests/unit/hooks/data_source/test_mysql.py
  82. 11 47
      tests/unit/hooks/data_source/test_postgresql.py
  83. 3 12
      tests/unit/hooks/data_source/test_sqlite.py
  84. 0 16
      tests/unit/hooks/data_source/test_zfs.py
  85. 0 1
      tests/unit/hooks/monitoring/test_apprise.py
  86. 12 15
      tests/unit/hooks/test_command.py
  87. 17 19
      tests/unit/hooks/test_dispatch.py
  88. 8 0
      tests/unit/test_execute.py
  89. 137 2
      tests/unit/test_logger.py

+ 1 - 0
NEWS

@@ -1,4 +1,5 @@
 1.9.9.dev0
+ * #635: Log the repository path or label on every relevant log message, not just some logs.
  * #981: Fix a "spot" check file count delta error.
  * #982: Fix for borgmatic "exclude_patterns" and "exclude_from" recursing into excluded
    subdirectories.

+ 1 - 3
borgmatic/actions/borg.py

@@ -22,9 +22,7 @@ def run_borg(
     if borg_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, borg_arguments.repository
     ):
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Running arbitrary Borg command'
-        )
+        logger.info('Running arbitrary Borg command')
         archive_name = borgmatic.borg.repo_list.resolve_archive_name(
             repository['path'],
             borg_arguments.archive,

+ 1 - 3
borgmatic/actions/break_lock.py

@@ -21,9 +21,7 @@ def run_break_lock(
     if break_lock_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, break_lock_arguments.repository
     ):
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Breaking repository and cache locks'
-        )
+        logger.info('Breaking repository and cache locks')
         borgmatic.borg.break_lock.break_lock(
             repository['path'],
             config,

+ 1 - 3
borgmatic/actions/change_passphrase.py

@@ -24,9 +24,7 @@ def run_change_passphrase(
             repository, change_passphrase_arguments.repository
         )
     ):
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Changing repository passphrase'
-        )
+        logger.info('Changing repository passphrase')
         borgmatic.borg.change_passphrase.change_passphrase(
             repository['path'],
             config,

+ 16 - 23
borgmatic/actions/check.py

@@ -363,7 +363,6 @@ def collect_spot_check_source_paths(
         borgmatic.hooks.dispatch.call_hooks(
             'use_streaming',
             config,
-            repository['path'],
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
         ).values()
     )
@@ -468,15 +467,14 @@ def compare_spot_check_hashes(
     global_arguments,
     local_path,
     remote_path,
-    log_prefix,
     source_paths,
 ):
     '''
     Given a repository configuration dict, the name of the latest archive, a configuration dict, the
     local Borg version, global arguments as an argparse.Namespace instance, the local Borg path, the
-    remote Borg path, a log label, and spot check source paths, compare the hashes for a sampling of
-    the source paths with hashes from corresponding paths in the given archive. Return a sequence of
-    the paths that fail that hash comparison.
+    remote Borg path, and spot check source paths, compare the hashes for a sampling of the source
+    paths with hashes from corresponding paths in the given archive. Return a sequence of the paths
+    that fail that hash comparison.
     '''
     # Based on the configured sample percentage, come up with a list of random sample files from the
     # source directories.
@@ -492,7 +490,7 @@ def compare_spot_check_hashes(
         if os.path.exists(os.path.join(working_directory or '', source_path))
     }
     logger.debug(
-        f'{log_prefix}: Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check'
+        f'Sampling {sample_count} source paths (~{spot_check_config["data_sample_percentage"]}%) for spot check'
     )
 
     source_sample_paths_iterator = iter(source_sample_paths)
@@ -580,8 +578,7 @@ def spot_check(
     disk to those stored in the latest archive. If any differences are beyond configured tolerances,
     then the check fails.
     '''
-    log_prefix = f'{repository.get("label", repository["path"])}'
-    logger.debug(f'{log_prefix}: Running spot check')
+    logger.debug('Running spot check')
 
     try:
         spot_check_config = next(
@@ -604,7 +601,7 @@ def spot_check(
         remote_path,
         borgmatic_runtime_directory,
     )
-    logger.debug(f'{log_prefix}: {len(source_paths)} total source paths for spot check')
+    logger.debug(f'{len(source_paths)} total source paths for spot check')
 
     archive = borgmatic.borg.repo_list.resolve_archive_name(
         repository['path'],
@@ -615,7 +612,7 @@ def spot_check(
         local_path,
         remote_path,
     )
-    logger.debug(f'{log_prefix}: Using archive {archive} for spot check')
+    logger.debug(f'Using archive {archive} for spot check')
 
     archive_paths = collect_spot_check_archive_paths(
         repository,
@@ -627,11 +624,11 @@ def spot_check(
         remote_path,
         borgmatic_runtime_directory,
     )
-    logger.debug(f'{log_prefix}: {len(archive_paths)} total archive paths for spot check')
+    logger.debug(f'{len(archive_paths)} total archive paths for spot check')
 
     if len(source_paths) == 0:
         logger.debug(
-            f'{log_prefix}: Paths in latest archive but not source paths: {", ".join(set(archive_paths)) or "none"}'
+            f'Paths in latest archive but not source paths: {", ".join(set(archive_paths)) or "none"}'
         )
         raise ValueError(
             'Spot check failed: There are no source paths to compare against the archive'
@@ -644,10 +641,10 @@ def spot_check(
     if count_delta_percentage > spot_check_config['count_tolerance_percentage']:
         rootless_source_paths = set(path.lstrip(os.path.sep) for path in source_paths)
         logger.debug(
-            f'{log_prefix}: Paths in source paths but not latest archive: {", ".join(rootless_source_paths - set(archive_paths)) or "none"}'
+            f'Paths in source paths but not latest archive: {", ".join(rootless_source_paths - set(archive_paths)) or "none"}'
         )
         logger.debug(
-            f'{log_prefix}: Paths in latest archive but not source paths: {", ".join(set(archive_paths) - rootless_source_paths) or "none"}'
+            f'Paths in latest archive but not source paths: {", ".join(set(archive_paths) - rootless_source_paths) or "none"}'
         )
         raise ValueError(
             f'Spot check failed: {count_delta_percentage:.2f}% file count delta between source paths and latest archive (tolerance is {spot_check_config["count_tolerance_percentage"]}%)'
@@ -661,25 +658,24 @@ def spot_check(
         global_arguments,
         local_path,
         remote_path,
-        log_prefix,
         source_paths,
     )
 
     # Error if the percentage of failing hashes exceeds the configured tolerance percentage.
-    logger.debug(f'{log_prefix}: {len(failing_paths)} non-matching spot check hashes')
+    logger.debug(f'{len(failing_paths)} non-matching spot check hashes')
     data_tolerance_percentage = spot_check_config['data_tolerance_percentage']
     failing_percentage = (len(failing_paths) / len(source_paths)) * 100
 
     if failing_percentage > data_tolerance_percentage:
         logger.debug(
-            f'{log_prefix}: Source paths with data not matching the latest archive: {", ".join(failing_paths)}'
+            f'Source paths with data not matching the latest archive: {", ".join(failing_paths)}'
         )
         raise ValueError(
             f'Spot check failed: {failing_percentage:.2f}% of source paths with data not matching the latest archive (tolerance is {data_tolerance_percentage}%)'
         )
 
     logger.info(
-        f'{log_prefix}: Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta'
+        f'Spot check passed with a {count_delta_percentage:.2f}% file count delta and a {failing_percentage:.2f}% file data delta'
     )
 
 
@@ -713,8 +709,7 @@ def run_check(
         **hook_context,
     )
 
-    log_prefix = repository.get('label', repository['path'])
-    logger.info(f'{log_prefix}: Running consistency checks')
+    logger.info('Running consistency checks')
 
     repository_id = borgmatic.borg.check.get_repository_id(
         repository['path'],
@@ -767,9 +762,7 @@ def run_check(
         write_check_time(make_check_time_path(config, repository_id, 'extract'))
 
     if 'spot' in checks:
-        with borgmatic.config.paths.Runtime_directory(
-            config, log_prefix
-        ) as borgmatic_runtime_directory:
+        with borgmatic.config.paths.Runtime_directory(config) as borgmatic_runtime_directory:
             spot_check(
                 repository,
                 config,

+ 3 - 6
borgmatic/actions/compact.py

@@ -37,9 +37,7 @@ def run_compact(
         **hook_context,
     )
     if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Compacting segments{dry_run_label}'
-        )
+        logger.info(f'Compacting segments{dry_run_label}')
         borgmatic.borg.compact.compact_segments(
             global_arguments.dry_run,
             repository['path'],
@@ -53,9 +51,8 @@ def run_compact(
             threshold=compact_arguments.threshold,
         )
     else:  # pragma: nocover
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Skipping compact (only available/needed in Borg 1.2+)'
-        )
+        logger.info('Skipping compact (only available/needed in Borg 1.2+)')
+
     borgmatic.hooks.command.execute_hook(
         config.get('after_compact'),
         config.get('umask'),

+ 0 - 1
borgmatic/actions/config/bootstrap.py

@@ -45,7 +45,6 @@ def get_config_paths(archive_name, bootstrap_arguments, global_arguments, local_
     # still want to support reading the manifest from previously created archives as well.
     with borgmatic.config.paths.Runtime_directory(
         {'user_runtime_directory': bootstrap_arguments.user_runtime_directory},
-        bootstrap_arguments.repository,
     ) as borgmatic_runtime_directory:
         for base_directory in (
             'borgmatic',

+ 2 - 8
borgmatic/actions/create.py

@@ -283,17 +283,13 @@ def run_create(
         **hook_context,
     )
 
-    log_prefix = repository.get('label', repository['path'])
-    logger.info(f'{log_prefix}: Creating archive{dry_run_label}')
+    logger.info(f'Creating archive{dry_run_label}')
     working_directory = borgmatic.config.paths.get_working_directory(config)
 
-    with borgmatic.config.paths.Runtime_directory(
-        config, log_prefix
-    ) as borgmatic_runtime_directory:
+    with borgmatic.config.paths.Runtime_directory(config) as borgmatic_runtime_directory:
         borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
             'remove_data_source_dumps',
             config,
-            repository['path'],
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             borgmatic_runtime_directory,
             global_arguments.dry_run,
@@ -302,7 +298,6 @@ def run_create(
         active_dumps = borgmatic.hooks.dispatch.call_hooks(
             'dump_data_sources',
             config,
-            repository['path'],
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             config_paths,
             borgmatic_runtime_directory,
@@ -339,7 +334,6 @@ def run_create(
         borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
             'remove_data_source_dumps',
             config,
-            config_filename,
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             borgmatic_runtime_directory,
             global_arguments.dry_run,

+ 1 - 1
borgmatic/actions/delete.py

@@ -23,7 +23,7 @@ def run_delete(
     if delete_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, delete_arguments.repository
     ):
-        logger.answer(f'{repository.get("label", repository["path"])}: Deleting archives')
+        logger.answer('Deleting archives')
 
         archive_name = (
             borgmatic.borg.repo_list.resolve_archive_name(

+ 1 - 1
borgmatic/actions/export_key.py

@@ -21,7 +21,7 @@ def run_export_key(
     if export_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, export_arguments.repository
     ):
-        logger.info(f'{repository.get("label", repository["path"])}: Exporting repository key')
+        logger.info('Exporting repository key')
         borgmatic.borg.export_key.export_key(
             repository['path'],
             config,

+ 1 - 3
borgmatic/actions/export_tar.py

@@ -22,9 +22,7 @@ def run_export_tar(
     if export_tar_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, export_tar_arguments.repository
     ):
-        logger.info(
-            f'{repository["path"]}: Exporting archive {export_tar_arguments.archive} as tar file'
-        )
+        logger.info(f'Exporting archive {export_tar_arguments.archive} as tar file')
         borgmatic.borg.export_tar.export_tar_archive(
             global_arguments.dry_run,
             repository['path'],

+ 1 - 3
borgmatic/actions/extract.py

@@ -33,9 +33,7 @@ def run_extract(
     if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, extract_arguments.repository
     ):
-        logger.info(
-            f'{repository.get("label", repository["path"])}: Extracting archive {extract_arguments.archive}'
-        )
+        logger.info(f'Extracting archive {extract_arguments.archive}')
         borgmatic.borg.extract.extract_archive(
             global_arguments.dry_run,
             repository['path'],

+ 1 - 3
borgmatic/actions/info.py

@@ -27,9 +27,7 @@ def run_info(
         repository, info_arguments.repository
     ):
         if not info_arguments.json:
-            logger.answer(
-                f'{repository.get("label", repository["path"])}: Displaying archive summary information'
-            )
+            logger.answer('Displaying archive summary information')
         archive_name = borgmatic.borg.repo_list.resolve_archive_name(
             repository['path'],
             info_arguments.archive,

+ 2 - 2
borgmatic/actions/list.py

@@ -27,9 +27,9 @@ def run_list(
     ):
         if not list_arguments.json:
             if list_arguments.find_paths:  # pragma: no cover
-                logger.answer(f'{repository.get("label", repository["path"])}: Searching archives')
+                logger.answer('Searching archives')
             elif not list_arguments.archive:  # pragma: no cover
-                logger.answer(f'{repository.get("label", repository["path"])}: Listing archives')
+                logger.answer('Listing archives')
 
         archive_name = borgmatic.borg.repo_list.resolve_archive_name(
             repository['path'],

+ 2 - 4
borgmatic/actions/mount.py

@@ -23,11 +23,9 @@ def run_mount(
         repository, mount_arguments.repository
     ):
         if mount_arguments.archive:
-            logger.info(
-                f'{repository.get("label", repository["path"])}: Mounting archive {mount_arguments.archive}'
-            )
+            logger.info(f'Mounting archive {mount_arguments.archive}')
         else:  # pragma: nocover
-            logger.info(f'{repository.get("label", repository["path"])}: Mounting repository')
+            logger.info('Mounting repository')
 
         borgmatic.borg.mount.mount_archive(
             repository['path'],

+ 1 - 1
borgmatic/actions/prune.py

@@ -35,7 +35,7 @@ def run_prune(
         global_arguments.dry_run,
         **hook_context,
     )
-    logger.info(f'{repository.get("label", repository["path"])}: Pruning archives{dry_run_label}')
+    logger.info(f'Pruning archives{dry_run_label}')
     borgmatic.borg.prune.prune_archives(
         global_arguments.dry_run,
         repository['path'],

+ 1 - 1
borgmatic/actions/repo_create.py

@@ -23,7 +23,7 @@ def run_repo_create(
     ):
         return
 
-    logger.info(f'{repository.get("label", repository["path"])}: Creating repository')
+    logger.info('Creating repository')
     borgmatic.borg.repo_create.create_repository(
         global_arguments.dry_run,
         repository['path'],

+ 1 - 2
borgmatic/actions/repo_delete.py

@@ -21,8 +21,7 @@ def run_repo_delete(
         repository, repo_delete_arguments.repository
     ):
         logger.answer(
-            f'{repository.get("label", repository["path"])}: Deleting repository'
-            + (' cache' if repo_delete_arguments.cache_only else '')
+            'Deleting repository' + (' cache' if repo_delete_arguments.cache_only else '')
         )
 
         borgmatic.borg.repo_delete.delete_repository(

+ 1 - 3
borgmatic/actions/repo_info.py

@@ -25,9 +25,7 @@ def run_repo_info(
         repository, repo_info_arguments.repository
     ):
         if not repo_info_arguments.json:
-            logger.answer(
-                f'{repository.get("label", repository["path"])}: Displaying repository summary information'
-            )
+            logger.answer('Displaying repository summary information')
 
         json_output = borgmatic.borg.repo_info.display_repository_info(
             repository['path'],

+ 1 - 1
borgmatic/actions/repo_list.py

@@ -25,7 +25,7 @@ def run_repo_list(
         repository, repo_list_arguments.repository
     ):
         if not repo_list_arguments.json:
-            logger.answer(f'{repository.get("label", repository["path"])}: Listing repository')
+            logger.answer('Listing repository')
 
         json_output = borgmatic.borg.repo_list.list_repository(
             repository['path'],

+ 6 - 18
borgmatic/actions/restore.py

@@ -71,10 +71,10 @@ def render_dump_metadata(dump):
     return metadata
 
 
-def get_configured_data_source(config, restore_dump, log_prefix):
+def get_configured_data_source(config, restore_dump):
     '''
     Search in the given configuration dict for dumps corresponding to the given dump to restore. If
-    there are multiple matches, error. Log using the given log prefix.
+    there are multiple matches, error.
 
     Return the found data source as a data source configuration dict or None if not found.
     '''
@@ -91,7 +91,6 @@ def get_configured_data_source(config, restore_dump, log_prefix):
             borgmatic.hooks.dispatch.call_hook(
                 function_name='get_default_port',
                 config=config,
-                log_prefix=log_prefix,
                 hook_name=hook_name,
             ),
         )
@@ -173,14 +172,11 @@ def restore_single_dump(
         Dump(hook_name, data_source['name'], data_source.get('hostname'), data_source.get('port'))
     )
 
-    logger.info(
-        f'{repository.get("label", repository["path"])}: Restoring data source {dump_metadata}'
-    )
+    logger.info(f'Restoring data source {dump_metadata}')
 
     dump_patterns = borgmatic.hooks.dispatch.call_hooks(
         'make_data_source_dump_patterns',
         config,
-        repository['path'],
         borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
         borgmatic_runtime_directory,
         data_source['name'],
@@ -227,7 +223,6 @@ def restore_single_dump(
     borgmatic.hooks.dispatch.call_hook(
         function_name='restore_data_source_dump',
         config=config,
-        log_prefix=repository['path'],
         hook_name=hook_name,
         data_source=data_source,
         dry_run=global_arguments.dry_run,
@@ -319,7 +314,7 @@ def collect_dumps_from_archive(
             break
         else:
             logger.warning(
-                f'{repository}: Ignoring invalid data source dump path "{dump_path}" in archive {archive}'
+                f'Ignoring invalid data source dump path "{dump_path}" in archive {archive}'
             )
 
     return dumps_from_archive
@@ -444,16 +439,12 @@ def run_restore(
     ):
         return
 
-    log_prefix = repository.get('label', repository['path'])
-    logger.info(f'{log_prefix}: Restoring data sources from archive {restore_arguments.archive}')
+    logger.info(f'Restoring data sources from archive {restore_arguments.archive}')
 
-    with borgmatic.config.paths.Runtime_directory(
-        config, log_prefix
-    ) as borgmatic_runtime_directory:
+    with borgmatic.config.paths.Runtime_directory(config) as borgmatic_runtime_directory:
         borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
             'remove_data_source_dumps',
             config,
-            repository['path'],
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             borgmatic_runtime_directory,
             global_arguments.dry_run,
@@ -494,7 +485,6 @@ def run_restore(
             found_data_source = get_configured_data_source(
                 config,
                 restore_dump,
-                log_prefix=repository['path'],
             )
 
             # For a dump that wasn't found via an exact match in the configuration, try to fallback
@@ -503,7 +493,6 @@ def run_restore(
                 found_data_source = get_configured_data_source(
                     config,
                     Dump(restore_dump.hook_name, 'all', restore_dump.hostname, restore_dump.port),
-                    log_prefix=repository['path'],
                 )
 
                 if not found_data_source:
@@ -531,7 +520,6 @@ def run_restore(
         borgmatic.hooks.dispatch.call_hooks_even_if_unconfigured(
             'remove_data_source_dumps',
             config,
-            repository['path'],
             borgmatic.hooks.dispatch.Hook_type.DATA_SOURCE,
             borgmatic_runtime_directory,
             global_arguments.dry_run,

+ 1 - 3
borgmatic/actions/transfer.py

@@ -17,9 +17,7 @@ def run_transfer(
     '''
     Run the "transfer" action for the given repository.
     '''
-    logger.info(
-        f'{repository.get("label", repository["path"])}: Transferring archives to repository'
-    )
+    logger.info('Transferring archives to repository')
     borgmatic.borg.transfer.transfer_archives(
         global_arguments.dry_run,
         repository['path'],

+ 1 - 1
borgmatic/borg/change_passphrase.py

@@ -41,7 +41,7 @@ def change_passphrase(
     )
 
     if global_arguments.dry_run:
-        logger.info(f'{repository_path}: Skipping change password (dry run)')
+        logger.info('Skipping change password (dry run)')
         return
 
     # If the original passphrase is set programmatically, then Borg won't prompt for a new one! So

+ 1 - 1
borgmatic/borg/compact.py

@@ -43,7 +43,7 @@ def compact_segments(
     )
 
     if dry_run:
-        logging.info(f'{repository_path}: Skipping compact (dry run)')
+        logging.info('Skipping compact (dry run)')
         return
 
     execute_command(

+ 6 - 13
borgmatic/borg/create.py

@@ -20,14 +20,12 @@ from borgmatic.execute import (
 logger = logging.getLogger(__name__)
 
 
-def write_patterns_file(patterns, borgmatic_runtime_directory, log_prefix, patterns_file=None):
+def write_patterns_file(patterns, borgmatic_runtime_directory, patterns_file=None):
     '''
     Given a sequence of patterns as borgmatic.borg.pattern.Pattern instances, write them to a named
     temporary file in the given borgmatic runtime directory and return the file object so it can
     continue to exist on disk as long as the caller needs it.
 
-    Use the given log prefix in any logging.
-
     If an optional open pattern file is given, append to it instead of making a new temporary file.
     Return None if no patterns are provided.
     '''
@@ -45,9 +43,7 @@ def write_patterns_file(patterns, borgmatic_runtime_directory, log_prefix, patte
         f'{pattern.type.value} {pattern.style.value}{":" if pattern.style.value else ""}{pattern.path}'
         for pattern in patterns
     )
-    logger.debug(
-        f'{log_prefix}: {operation_name} patterns to {patterns_file.name}:\n{patterns_output}'
-    )
+    logger.debug(f'{operation_name} patterns to {patterns_file.name}:\n{patterns_output}')
 
     patterns_file.write(patterns_output)
     patterns_file.flush()
@@ -221,9 +217,7 @@ def make_base_create_command(
     if config.get('source_directories_must_exist', False):
         check_all_root_patterns_exist(patterns)
 
-    patterns_file = write_patterns_file(
-        patterns, borgmatic_runtime_directory, log_prefix=repository_path
-    )
+    patterns_file = write_patterns_file(patterns, borgmatic_runtime_directory)
     checkpoint_interval = config.get('checkpoint_interval', None)
     checkpoint_volume = config.get('checkpoint_volume', None)
     chunker_params = config.get('chunker_params', None)
@@ -303,12 +297,12 @@ def make_base_create_command(
     # cause Borg to hang. But skip this if the user has explicitly set the "read_special" to True.
     if stream_processes and not config.get('read_special'):
         logger.warning(
-            f'{repository_path}: Ignoring configured "read_special" value of false, as true is needed for database hooks.'
+            'Ignoring configured "read_special" value of false, as true is needed for database hooks.'
         )
         borg_environment = environment.make_environment(config)
         working_directory = borgmatic.config.paths.get_working_directory(config)
 
-        logger.debug(f'{repository_path}: Collecting special file paths')
+        logger.debug('Collecting special file paths')
         special_file_paths = collect_special_file_paths(
             dry_run,
             create_flags + create_positional_arguments,
@@ -326,7 +320,7 @@ def make_base_create_command(
                 placeholder=' ...',
             )
             logger.warning(
-                f'{repository_path}: Excluding special files to prevent Borg from hanging: {truncated_special_file_paths}'
+                f'Excluding special files to prevent Borg from hanging: {truncated_special_file_paths}'
             )
             patterns_file = write_patterns_file(
                 tuple(
@@ -338,7 +332,6 @@ def make_base_create_command(
                     for special_file_path in special_file_paths
                 ),
                 borgmatic_runtime_directory,
-                log_prefix=repository_path,
                 patterns_file=patterns_file,
             )
 

+ 1 - 1
borgmatic/borg/export_key.py

@@ -60,7 +60,7 @@ def export_key(
     )
 
     if global_arguments.dry_run:
-        logger.info(f'{repository_path}: Skipping key export (dry run)')
+        logger.info('Skipping key export (dry run)')
         return
 
     execute_command(

+ 1 - 1
borgmatic/borg/export_tar.py

@@ -63,7 +63,7 @@ def export_tar_archive(
         output_log_level = logging.INFO
 
     if dry_run:
-        logging.info(f'{repository_path}: Skipping export to tar file (dry run)')
+        logging.info('Skipping export to tar file (dry run)')
         return
 
     execute_command(

+ 1 - 1
borgmatic/borg/list.py

@@ -237,7 +237,7 @@ def list_archive(
 
     # For each archive listed by Borg, run list on the contents of that archive.
     for archive in archive_lines:
-        logger.answer(f'{repository_path}: Listing archive {archive}')
+        logger.answer(f'Listing archive {archive}')
 
         archive_arguments = copy.copy(list_arguments)
         archive_arguments.archive = archive

+ 2 - 2
borgmatic/borg/repo_create.py

@@ -57,7 +57,7 @@ def create_repository(
                 f'Requested encryption mode "{encryption_mode}" does not match existing repository encryption mode "{repository_encryption_mode}"'
             )
 
-        logger.info(f'{repository_path}: Repository already exists. Skipping creation.')
+        logger.info('Repository already exists. Skipping creation.')
         return
     except subprocess.CalledProcessError as error:
         if error.returncode not in REPO_INFO_REPOSITORY_NOT_FOUND_EXIT_CODES:
@@ -91,7 +91,7 @@ def create_repository(
     )
 
     if dry_run:
-        logging.info(f'{repository_path}: Skipping repository creation (dry run)')
+        logging.info('Skipping repository creation (dry run)')
         return
 
     # Do not capture output here, so as to support interactive prompts.

+ 1 - 1
borgmatic/borg/repo_list.py

@@ -59,7 +59,7 @@ def resolve_archive_name(
     except IndexError:
         raise ValueError('No archives found in the repository')
 
-    logger.debug(f'{repository_path}: Latest archive is {latest_archive}')
+    logger.debug(f'Latest archive is {latest_archive}')
 
     return latest_archive
 

+ 76 - 72
borgmatic/commands/borgmatic.py

@@ -39,7 +39,13 @@ from borgmatic.commands.arguments import parse_arguments
 from borgmatic.config import checks, collect, validate
 from borgmatic.hooks import command, dispatch
 from borgmatic.hooks.monitoring import monitor
-from borgmatic.logger import DISABLED, add_custom_log_levels, configure_logging, should_do_markup
+from borgmatic.logger import (
+    DISABLED,
+    Log_prefix,
+    add_custom_log_levels,
+    configure_logging,
+    should_do_markup,
+)
 from borgmatic.signals import configure_signals
 from borgmatic.verbosity import verbosity_to_log_level
 
@@ -86,12 +92,12 @@ def run_configuration(config_filename, config, config_paths, arguments):
 
     if skip_actions:
         logger.debug(
-            f"{config_filename}: Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions"
+            f"Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions"
         )
 
     try:
         local_borg_version = borg_version.local_borg_version(config, local_path)
-        logger.debug(f'{config_filename}: Borg {local_borg_version}')
+        logger.debug(f'Borg {local_borg_version}')
     except (OSError, CalledProcessError, ValueError) as error:
         yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
         return
@@ -101,8 +107,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
             dispatch.call_hooks(
                 'initialize_monitor',
                 config,
-                config_filename,
                 dispatch.Hook_type.MONITORING,
+                config_filename,
                 monitoring_log_level,
                 global_arguments.dry_run,
             )
@@ -110,14 +116,14 @@ def run_configuration(config_filename, config, config_paths, arguments):
             dispatch.call_hooks(
                 'ping_monitor',
                 config,
-                config_filename,
                 dispatch.Hook_type.MONITORING,
+                config_filename,
                 monitor.State.START,
                 monitoring_log_level,
                 global_arguments.dry_run,
             )
     except (OSError, CalledProcessError) as error:
-        if command.considered_soft_failure(config_filename, error):
+        if command.considered_soft_failure(error):
             return
 
         encountered_error = error
@@ -132,53 +138,49 @@ def run_configuration(config_filename, config, config_paths, arguments):
 
         while not repo_queue.empty():
             repository, retry_num = repo_queue.get()
-            logger.debug(
-                f'{repository.get("label", repository["path"])}: Running actions for repository'
-            )
-            timeout = retry_num * retry_wait
-            if timeout:
-                logger.warning(
-                    f'{repository.get("label", repository["path"])}: Sleeping {timeout}s before next retry'
-                )
-                time.sleep(timeout)
-            try:
-                yield from run_actions(
-                    arguments=arguments,
-                    config_filename=config_filename,
-                    config=config,
-                    config_paths=config_paths,
-                    local_path=local_path,
-                    remote_path=remote_path,
-                    local_borg_version=local_borg_version,
-                    repository=repository,
-                )
-            except (OSError, CalledProcessError, ValueError) as error:
-                if retry_num < retries:
-                    repo_queue.put(
-                        (repository, retry_num + 1),
+
+            with Log_prefix(repository.get('label', repository['path'])):
+                logger.debug('Running actions for repository')
+                timeout = retry_num * retry_wait
+                if timeout:
+                    logger.warning(f'Sleeping {timeout}s before next retry')
+                    time.sleep(timeout)
+                try:
+                    yield from run_actions(
+                        arguments=arguments,
+                        config_filename=config_filename,
+                        config=config,
+                        config_paths=config_paths,
+                        local_path=local_path,
+                        remote_path=remote_path,
+                        local_borg_version=local_borg_version,
+                        repository=repository,
                     )
-                    tuple(  # Consume the generator so as to trigger logging.
-                        log_error_records(
-                            f'{repository.get("label", repository["path"])}: Error running actions for repository',
-                            error,
-                            levelno=logging.WARNING,
-                            log_command_error_output=True,
+                except (OSError, CalledProcessError, ValueError) as error:
+                    if retry_num < retries:
+                        repo_queue.put(
+                            (repository, retry_num + 1),
                         )
-                    )
-                    logger.warning(
-                        f'{repository.get("label", repository["path"])}: Retrying... attempt {retry_num + 1}/{retries}'
-                    )
-                    continue
+                        tuple(  # Consume the generator so as to trigger logging.
+                            log_error_records(
+                                'Error running actions for repository',
+                                error,
+                                levelno=logging.WARNING,
+                                log_command_error_output=True,
+                            )
+                        )
+                        logger.warning(f'Retrying... attempt {retry_num + 1}/{retries}')
+                        continue
 
-                if command.considered_soft_failure(config_filename, error):
-                    continue
+                    if command.considered_soft_failure(error):
+                        continue
 
-                yield from log_error_records(
-                    f'{repository.get("label", repository["path"])}: Error running actions for repository',
-                    error,
-                )
-                encountered_error = error
-                error_repository = repository['path']
+                    yield from log_error_records(
+                        'Error running actions for repository',
+                        error,
+                    )
+                    encountered_error = error
+                    error_repository = repository['path']
 
     try:
         if monitoring_hooks_are_activated:
@@ -186,16 +188,16 @@ def run_configuration(config_filename, config, config_paths, arguments):
             dispatch.call_hooks(
                 'ping_monitor',
                 config,
-                config_filename,
                 dispatch.Hook_type.MONITORING,
+                config_filename,
                 monitor.State.LOG,
                 monitoring_log_level,
                 global_arguments.dry_run,
             )
     except (OSError, CalledProcessError) as error:
-        if not command.considered_soft_failure(config_filename, error):
+        if not command.considered_soft_failure(error):
             encountered_error = error
-            yield from log_error_records(f'{repository["path"]}: Error pinging monitor', error)
+            yield from log_error_records('Error pinging monitor', error)
 
     if not encountered_error:
         try:
@@ -203,8 +205,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
                 dispatch.call_hooks(
                     'ping_monitor',
                     config,
-                    config_filename,
                     dispatch.Hook_type.MONITORING,
+                    config_filename,
                     monitor.State.FINISH,
                     monitoring_log_level,
                     global_arguments.dry_run,
@@ -212,13 +214,12 @@ def run_configuration(config_filename, config, config_paths, arguments):
                 dispatch.call_hooks(
                     'destroy_monitor',
                     config,
-                    config_filename,
                     dispatch.Hook_type.MONITORING,
                     monitoring_log_level,
                     global_arguments.dry_run,
                 )
         except (OSError, CalledProcessError) as error:
-            if command.considered_soft_failure(config_filename, error):
+            if command.considered_soft_failure(error):
                 return
 
             encountered_error = error
@@ -239,8 +240,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
             dispatch.call_hooks(
                 'ping_monitor',
                 config,
-                config_filename,
                 dispatch.Hook_type.MONITORING,
+                config_filename,
                 monitor.State.FAIL,
                 monitoring_log_level,
                 global_arguments.dry_run,
@@ -248,13 +249,12 @@ def run_configuration(config_filename, config, config_paths, arguments):
             dispatch.call_hooks(
                 'destroy_monitor',
                 config,
-                config_filename,
                 dispatch.Hook_type.MONITORING,
                 monitoring_log_level,
                 global_arguments.dry_run,
             )
         except (OSError, CalledProcessError) as error:
-            if command.considered_soft_failure(config_filename, error):
+            if command.considered_soft_failure(error):
                 return
 
             yield from log_error_records(f'{config_filename}: Error running on-error hook', error)
@@ -819,23 +819,27 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
 
     # Execute the actions corresponding to each configuration file.
     json_results = []
+
     for config_filename, config in configs.items():
-        results = list(run_configuration(config_filename, config, config_paths, arguments))
-        error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
+        with Log_prefix(config_filename):
+            results = list(run_configuration(config_filename, config, config_paths, arguments))
+            error_logs = tuple(
+                result for result in results if isinstance(result, logging.LogRecord)
+            )
 
-        if error_logs:
-            yield from log_error_records(f'{config_filename}: An error occurred')
-            yield from error_logs
-        else:
-            yield logging.makeLogRecord(
-                dict(
-                    levelno=logging.INFO,
-                    levelname='INFO',
-                    msg=f'{config_filename}: Successfully ran configuration file',
+            if error_logs:
+                yield from log_error_records('An error occurred')
+                yield from error_logs
+            else:
+                yield logging.makeLogRecord(
+                    dict(
+                        levelno=logging.INFO,
+                        levelname='INFO',
+                        msg='Successfully ran configuration file',
+                    )
                 )
-            )
-            if results:
-                json_results.extend(results)
+                if results:
+                    json_results.extend(results)
 
     if 'umount' in arguments:
         logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")

+ 7 - 8
borgmatic/config/paths.py

@@ -76,14 +76,13 @@ class Runtime_directory:
     automatically gets cleaned up as necessary.
     '''
 
-    def __init__(self, config, log_prefix):
+    def __init__(self, config):
         '''
-        Given a configuration dict and a log prefix, determine the borgmatic runtime directory,
-        creating a secure, temporary directory within it if necessary. Defaults to
-        $XDG_RUNTIME_DIR/./borgmatic or $RUNTIME_DIRECTORY/./borgmatic or
-        $TMPDIR/borgmatic-[random]/./borgmatic or $TEMP/borgmatic-[random]/./borgmatic or
-        /tmp/borgmatic-[random]/./borgmatic where "[random]" is a randomly generated string intended
-        to avoid path collisions.
+        Given a configuration dict determine the borgmatic runtime directory, creating a secure,
+        temporary directory within it if necessary. Defaults to $XDG_RUNTIME_DIR/./borgmatic or
+        $RUNTIME_DIRECTORY/./borgmatic or $TMPDIR/borgmatic-[random]/./borgmatic or
+        $TEMP/borgmatic-[random]/./borgmatic or /tmp/borgmatic-[random]/./borgmatic where "[random]"
+        is a randomly generated string intended to avoid path collisions.
 
         If XDG_RUNTIME_DIR or RUNTIME_DIRECTORY is set and already ends in "/borgmatic", then don't
         tack on a second "/borgmatic" path component.
@@ -127,7 +126,7 @@ class Runtime_directory:
         )
         os.makedirs(self.runtime_path, mode=0o700, exist_ok=True)
 
-        logger.debug(f'{log_prefix}: Using runtime directory {os.path.normpath(self.runtime_path)}')
+        logger.debug(f'Using runtime directory {os.path.normpath(self.runtime_path)}')
 
     def __enter__(self):
         '''

+ 10 - 7
borgmatic/execute.py

@@ -6,6 +6,8 @@ import select
 import subprocess
 import textwrap
 
+import borgmatic.logger
+
 logger = logging.getLogger(__name__)
 
 
@@ -309,13 +311,14 @@ def execute_command(
     if not run_to_completion:
         return process
 
-    log_outputs(
-        (process,),
-        (input_file, output_file),
-        output_log_level,
-        borg_local_path,
-        borg_exit_codes,
-    )
+    with borgmatic.logger.Log_prefix(None):  # Log command output without any prefix.
+        log_outputs(
+            (process,),
+            (input_file, output_file),
+            output_log_level,
+            borg_local_path,
+            borg_exit_codes,
+        )
 
 
 def execute_command_and_capture_output(

+ 9 - 11
borgmatic/hooks/command.py

@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
 SOFT_FAIL_EXIT_CODE = 75
 
 
-def interpolate_context(config_filename, hook_description, command, context):
+def interpolate_context(hook_description, command, context):
     '''
     Given a config filename, a hook description, a single hook command, and a dict of context
     names/values, interpolate the values by "{name}" into the command and return the result.
@@ -22,7 +22,7 @@ def interpolate_context(config_filename, hook_description, command, context):
 
     for unsupported_variable in re.findall(r'{\w+}', command):
         logger.warning(
-            f"{config_filename}: Variable '{unsupported_variable}' is not supported in {hook_description} hook"
+            f"Variable '{unsupported_variable}' is not supported in {hook_description} hook"
         )
 
     return command
@@ -54,26 +54,24 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
     Raise subprocesses.CalledProcessError if an error occurs in a hook.
     '''
     if not commands:
-        logger.debug(f'{config_filename}: No commands to run for {description} hook')
+        logger.debug(f'No commands to run for {description} hook')
         return
 
     dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
 
     context['configuration_filename'] = config_filename
-    commands = [
-        interpolate_context(config_filename, description, command, context) for command in commands
-    ]
+    commands = [interpolate_context(description, command, context) for command in commands]
 
     if len(commands) == 1:
-        logger.info(f'{config_filename}: Running command for {description} hook{dry_run_label}')
+        logger.info(f'Running command for {description} hook{dry_run_label}')
     else:
         logger.info(
-            f'{config_filename}: Running {len(commands)} commands for {description} hook{dry_run_label}',
+            f'Running {len(commands)} commands for {description} hook{dry_run_label}',
         )
 
     if umask:
         parsed_umask = int(str(umask), 8)
-        logger.debug(f'{config_filename}: Set hook umask to {oct(parsed_umask)}')
+        logger.debug(f'Set hook umask to {oct(parsed_umask)}')
         original_umask = os.umask(parsed_umask)
     else:
         original_umask = None
@@ -94,7 +92,7 @@ def execute_hook(commands, umask, config_filename, description, dry_run, **conte
             os.umask(original_umask)
 
 
-def considered_soft_failure(config_filename, error):
+def considered_soft_failure(error):
     '''
     Given a configuration filename and an exception object, return whether the exception object
     represents a subprocess.CalledProcessError with a return code of SOFT_FAIL_EXIT_CODE. If so,
@@ -106,7 +104,7 @@ def considered_soft_failure(config_filename, error):
 
     if exit_code == SOFT_FAIL_EXIT_CODE:
         logger.info(
-            f'{config_filename}: Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining repository actions',
+            f'Command hook exited with soft failure exit code ({SOFT_FAIL_EXIT_CODE}); skipping remaining repository actions',
         )
         return True
 

+ 13 - 17
borgmatic/hooks/data_source/bootstrap.py

@@ -10,7 +10,7 @@ import borgmatic.config.paths
 logger = logging.getLogger(__name__)
 
 
-def use_streaming(hook_config, config, log_prefix):  # pragma: no cover
+def use_streaming(hook_config, config):  # pragma: no cover
     '''
     Return whether dump streaming is used for this hook. (Spoiler: It isn't.)
     '''
@@ -20,18 +20,17 @@ def use_streaming(hook_config, config, log_prefix):  # pragma: no cover
 def dump_data_sources(
     hook_config,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
     dry_run,
 ):
     '''
-    Given a bootstrap configuration dict, a configuration dict, a log prefix, the borgmatic
-    configuration file paths, the borgmatic runtime directory, the configured patterns, and whether
-    this is a dry run, create a borgmatic manifest file to store the paths of the configuration
-    files used to create the archive. But skip this if the bootstrap store_config_files option is
-    False or if this is a dry run.
+    Given a bootstrap configuration dict, a configuration dict, the borgmatic configuration file
+    paths, the borgmatic runtime directory, the configured patterns, and whether this is a dry run,
+    create a borgmatic manifest file to store the paths of the configuration files used to create
+    the archive. But skip this if the bootstrap store_config_files option is False or if this is a
+    dry run.
 
     Return an empty sequence, since there are no ongoing dump processes from this hook.
     '''
@@ -64,11 +63,11 @@ def dump_data_sources(
     return []
 
 
-def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
     '''
-    Given a bootstrap configuration dict, a configuration dict, a log prefix, the borgmatic runtime
-    directory, and whether this is a dry run, then remove the manifest file created above. If this
-    is a dry run, then don't actually remove anything.
+    Given a bootstrap configuration dict, a configuration dict, the borgmatic runtime directory, and
+    whether this is a dry run, then remove the manifest file created above. If this is a dry run,
+    then don't actually remove anything.
     '''
     dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
 
@@ -79,14 +78,12 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         'bootstrap',
     )
     logger.debug(
-        f'{log_prefix}: Looking for bootstrap manifest files to remove in {manifest_glob}{dry_run_label}'
+        f'Looking for bootstrap manifest files to remove in {manifest_glob}{dry_run_label}'
     )
 
     for manifest_directory in glob.glob(manifest_glob):
         manifest_file_path = os.path.join(manifest_directory, 'manifest.json')
-        logger.debug(
-            f'{log_prefix}: Removing bootstrap manifest at {manifest_file_path}{dry_run_label}'
-        )
+        logger.debug(f'Removing bootstrap manifest at {manifest_file_path}{dry_run_label}')
 
         if dry_run:
             continue
@@ -103,7 +100,7 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
 
 
 def make_data_source_dump_patterns(
-    hook_config, config, log_prefix, borgmatic_runtime_directory, name=None
+    hook_config, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
     Restores are implemented via the separate, purpose-specific "bootstrap" action rather than the
@@ -115,7 +112,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,

+ 20 - 24
borgmatic/hooks/data_source/btrfs.py

@@ -14,7 +14,7 @@ import borgmatic.hooks.data_source.snapshot
 logger = logging.getLogger(__name__)
 
 
-def use_streaming(hook_config, config, log_prefix):  # pragma: no cover
+def use_streaming(hook_config, config):  # pragma: no cover
     '''
     Return whether dump streaming is used for this hook. (Spoiler: It isn't.)
     '''
@@ -179,26 +179,24 @@ def snapshot_subvolume(btrfs_command, subvolume_path, snapshot_path):  # pragma:
 def dump_data_sources(
     hook_config,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
     dry_run,
 ):
     '''
-    Given a Btrfs configuration dict, a configuration dict, a log prefix, the borgmatic
-    configuration file paths, the borgmatic runtime directory, the configured patterns, and whether
-    this is a dry run, auto-detect and snapshot any Btrfs subvolume mount points listed in the given
-    patterns. Also update those patterns, replacing subvolume mount points with corresponding
-    snapshot directories so they get stored in the Borg archive instead. Use the log prefix in any
-    log entries.
+    Given a Btrfs configuration dict, a configuration dict, the borgmatic configuration file paths,
+    the borgmatic runtime directory, the configured patterns, and whether this is a dry run,
+    auto-detect and snapshot any Btrfs subvolume mount points listed in the given patterns. Also
+    update those patterns, replacing subvolume mount points with corresponding snapshot directories
+    so they get stored in the Borg archive instead.
 
     Return an empty sequence, since there are no ongoing dump processes from this hook.
 
     If this is a dry run, then don't actually snapshot anything.
     '''
     dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'{log_prefix}: Snapshotting Btrfs subvolumes{dry_run_label}')
+    logger.info(f'Snapshotting Btrfs subvolumes{dry_run_label}')
 
     # Based on the configured patterns, determine Btrfs subvolumes to backup.
     btrfs_command = hook_config.get('btrfs_command', 'btrfs')
@@ -206,11 +204,11 @@ def dump_data_sources(
     subvolumes = get_subvolumes(btrfs_command, findmnt_command, patterns)
 
     if not subvolumes:
-        logger.warning(f'{log_prefix}: No Btrfs subvolumes found to snapshot{dry_run_label}')
+        logger.warning(f'No Btrfs subvolumes found to snapshot{dry_run_label}')
 
     # Snapshot each subvolume, rewriting patterns to use their snapshot paths.
     for subvolume in subvolumes:
-        logger.debug(f'{log_prefix}: Creating Btrfs snapshot for {subvolume.path} subvolume')
+        logger.debug(f'Creating Btrfs snapshot for {subvolume.path} subvolume')
 
         snapshot_path = make_snapshot_path(subvolume.path)
 
@@ -248,12 +246,11 @@ def delete_snapshot(btrfs_command, snapshot_path):  # pragma: no cover
     )
 
 
-def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
     '''
-    Given a Btrfs configuration dict, a configuration dict, a log prefix, the borgmatic runtime
-    directory, and whether this is a dry run, delete any Btrfs snapshots created by borgmatic. Use
-    the log prefix in any log entries. If this is a dry run or Btrfs isn't configured in borgmatic's
-    configuration, then don't actually remove anything.
+    Given a Btrfs configuration dict, a configuration dict, the borgmatic runtime directory, and
+    whether this is a dry run, delete any Btrfs snapshots created by borgmatic. If this is a dry run
+    or Btrfs isn't configured in borgmatic's configuration, then don't actually remove anything.
     '''
     if hook_config is None:
         return
@@ -266,10 +263,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
     try:
         all_subvolumes = get_subvolumes(btrfs_command, findmnt_command)
     except FileNotFoundError as error:
-        logger.debug(f'{log_prefix}: Could not find "{error.filename}" command')
+        logger.debug(f'Could not find "{error.filename}" command')
         return
     except subprocess.CalledProcessError as error:
-        logger.debug(f'{log_prefix}: {error}')
+        logger.debug(error)
         return
 
     # Reversing the sorted subvolumes ensures that we remove longer mount point paths of child
@@ -281,14 +278,14 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         )
 
         logger.debug(
-            f'{log_prefix}: Looking for snapshots to remove in {subvolume_snapshots_glob}{dry_run_label}'
+            f'Looking for snapshots to remove in {subvolume_snapshots_glob}{dry_run_label}'
         )
 
         for snapshot_path in glob.glob(subvolume_snapshots_glob):
             if not os.path.isdir(snapshot_path):
                 continue
 
-            logger.debug(f'{log_prefix}: Deleting Btrfs snapshot {snapshot_path}{dry_run_label}')
+            logger.debug(f'Deleting Btrfs snapshot {snapshot_path}{dry_run_label}')
 
             if dry_run:
                 continue
@@ -296,10 +293,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
             try:
                 delete_snapshot(btrfs_command, snapshot_path)
             except FileNotFoundError:
-                logger.debug(f'{log_prefix}: Could not find "{btrfs_command}" command')
+                logger.debug(f'Could not find "{btrfs_command}" command')
                 return
             except subprocess.CalledProcessError as error:
-                logger.debug(f'{log_prefix}: {error}')
+                logger.debug(error)
                 return
 
             # Strip off the subvolume path from the end of the snapshot path and then delete the
@@ -308,7 +305,7 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
 
 
 def make_data_source_dump_patterns(
-    hook_config, config, log_prefix, borgmatic_runtime_directory, name=None
+    hook_config, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
@@ -319,7 +316,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,

+ 2 - 2
borgmatic/hooks/data_source/dump.py

@@ -46,14 +46,14 @@ def create_named_pipe_for_dump(dump_path):
     os.mkfifo(dump_path, mode=0o600)
 
 
-def remove_data_source_dumps(dump_path, data_source_type_name, log_prefix, dry_run):
+def remove_data_source_dumps(dump_path, data_source_type_name, dry_run):
     '''
     Remove all data source dumps in the given dump directory path (including the directory itself).
     If this is a dry run, then don't actually remove anything.
     '''
     dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
 
-    logger.debug(f'{log_prefix}: Removing {data_source_type_name} data source dumps{dry_run_label}')
+    logger.debug(f'Removing {data_source_type_name} data source dumps{dry_run_label}')
 
     if dry_run:
         return

+ 25 - 32
borgmatic/hooks/data_source/lvm.py

@@ -14,7 +14,7 @@ import borgmatic.hooks.data_source.snapshot
 logger = logging.getLogger(__name__)
 
 
-def use_streaming(hook_config, config, log_prefix):  # pragma: no cover
+def use_streaming(hook_config, config):  # pragma: no cover
     '''
     Return whether dump streaming is used for this hook. (Spoiler: It isn't.)
     '''
@@ -161,26 +161,24 @@ DEFAULT_SNAPSHOT_SIZE = '10%ORIGIN'
 def dump_data_sources(
     hook_config,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
     dry_run,
 ):
     '''
-    Given an LVM configuration dict, a configuration dict, a log prefix, the borgmatic configuration
-    file paths, the borgmatic runtime directory, the configured patterns, and whether this is a dry
-    run, auto-detect and snapshot any LVM logical volume mount points listed in the given patterns.
-    Also update those patterns, replacing logical volume mount points with corresponding snapshot
-    directories so they get stored in the Borg archive instead. Use the log prefix in any log
-    entries.
+    Given an LVM configuration dict, a configuration dict, the borgmatic configuration file paths,
+    the borgmatic runtime directory, the configured patterns, and whether this is a dry run,
+    auto-detect and snapshot any LVM logical volume mount points listed in the given patterns. Also
+    update those patterns, replacing logical volume mount points with corresponding snapshot
+    directories so they get stored in the Borg archive instead.
 
     Return an empty sequence, since there are no ongoing dump processes from this hook.
 
     If this is a dry run, then don't actually snapshot anything.
     '''
     dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'{log_prefix}: Snapshotting LVM logical volumes{dry_run_label}')
+    logger.info(f'Snapshotting LVM logical volumes{dry_run_label}')
 
     # List logical volumes to get their mount points.
     lsblk_command = hook_config.get('lsblk_command', 'lsblk')
@@ -191,12 +189,12 @@ def dump_data_sources(
     normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
 
     if not requested_logical_volumes:
-        logger.warning(f'{log_prefix}: No LVM logical volumes found to snapshot{dry_run_label}')
+        logger.warning(f'No LVM logical volumes found to snapshot{dry_run_label}')
 
     for logical_volume in requested_logical_volumes:
         snapshot_name = f'{logical_volume.name}_{snapshot_suffix}'
         logger.debug(
-            f'{log_prefix}: Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}'
+            f'Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}'
         )
 
         if not dry_run:
@@ -224,7 +222,7 @@ def dump_data_sources(
         )
 
         logger.debug(
-            f'{log_prefix}: Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            f'Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}'
         )
 
         if dry_run:
@@ -312,12 +310,12 @@ def get_snapshots(lvs_command, snapshot_name=None):
         raise ValueError(f'Invalid {lvs_command} output: Missing key "{error}"')
 
 
-def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
     '''
-    Given an LVM configuration dict, a configuration dict, a log prefix, the borgmatic runtime
-    directory, and whether this is a dry run, unmount and delete any LVM snapshots created by
-    borgmatic. Use the log prefix in any log entries. If this is a dry run or LVM isn't configured
-    in borgmatic's configuration, then don't actually remove anything.
+    Given an LVM configuration dict, a configuration dict, the borgmatic runtime directory, and
+    whether this is a dry run, unmount and delete any LVM snapshots created by borgmatic. If this is
+    a dry run or LVM isn't configured in borgmatic's configuration, then don't actually remove
+    anything.
     '''
     if hook_config is None:
         return
@@ -328,10 +326,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
     try:
         logical_volumes = get_logical_volumes(hook_config.get('lsblk_command', 'lsblk'))
     except FileNotFoundError as error:
-        logger.debug(f'{log_prefix}: Could not find "{error.filename}" command')
+        logger.debug(f'Could not find "{error.filename}" command')
         return
     except subprocess.CalledProcessError as error:
-        logger.debug(f'{log_prefix}: {error}')
+        logger.debug(error)
         return
 
     snapshots_glob = os.path.join(
@@ -340,9 +338,7 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         ),
         'lvm_snapshots',
     )
-    logger.debug(
-        f'{log_prefix}: Looking for snapshots to remove in {snapshots_glob}{dry_run_label}'
-    )
+    logger.debug(f'Looking for snapshots to remove in {snapshots_glob}{dry_run_label}')
     umount_command = hook_config.get('umount_command', 'umount')
 
     for snapshots_directory in glob.glob(snapshots_glob):
@@ -366,9 +362,7 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
                 if not os.path.isdir(snapshot_mount_path):
                     continue
 
-            logger.debug(
-                f'{log_prefix}: Unmounting LVM snapshot at {snapshot_mount_path}{dry_run_label}'
-            )
+            logger.debug(f'Unmounting LVM snapshot at {snapshot_mount_path}{dry_run_label}')
 
             if dry_run:
                 continue
@@ -376,10 +370,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
             try:
                 unmount_snapshot(umount_command, snapshot_mount_path)
             except FileNotFoundError:
-                logger.debug(f'{log_prefix}: Could not find "{umount_command}" command')
+                logger.debug(f'Could not find "{umount_command}" command')
                 return
             except subprocess.CalledProcessError as error:
-                logger.debug(f'{log_prefix}: {error}')
+                logger.debug(error)
                 return
 
         if not dry_run:
@@ -391,10 +385,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
     try:
         snapshots = get_snapshots(hook_config.get('lvs_command', 'lvs'))
     except FileNotFoundError as error:
-        logger.debug(f'{log_prefix}: Could not find "{error.filename}" command')
+        logger.debug(f'Could not find "{error.filename}" command')
         return
     except subprocess.CalledProcessError as error:
-        logger.debug(f'{log_prefix}: {error}')
+        logger.debug(error)
         return
 
     for snapshot in snapshots:
@@ -402,14 +396,14 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         if not snapshot.name.split('_')[-1].startswith(BORGMATIC_SNAPSHOT_PREFIX):
             continue
 
-        logger.debug(f'{log_prefix}: Deleting LVM snapshot {snapshot.name}{dry_run_label}')
+        logger.debug(f'Deleting LVM snapshot {snapshot.name}{dry_run_label}')
 
         if not dry_run:
             remove_snapshot(lvremove_command, snapshot.device_path)
 
 
 def make_data_source_dump_patterns(
-    hook_config, config, log_prefix, borgmatic_runtime_directory, name=None
+    hook_config, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
@@ -420,7 +414,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,

+ 25 - 37
borgmatic/hooks/data_source/mariadb.py

@@ -25,7 +25,7 @@ def make_dump_path(base_directory):  # pragma: no cover
 SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
 
 
-def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
+def database_names_to_dump(database, extra_environment, dry_run):
     '''
     Given a requested database config, return the corresponding sequence of database names to dump.
     In the case of "all", query for the names of databases on the configured host and return them,
@@ -49,7 +49,7 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
         + ('--skip-column-names', '--batch')
         + ('--execute', 'show schemas')
     )
-    logger.debug(f'{log_prefix}: Querying for "all" MariaDB databases to dump')
+    logger.debug('Querying for "all" MariaDB databases to dump')
     show_output = execute_command_and_capture_output(
         show_command, extra_environment=extra_environment
     )
@@ -62,12 +62,11 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
 
 
 def execute_dump_command(
-    database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
+    database, dump_path, database_names, extra_environment, dry_run, dry_run_label
 ):
     '''
     Kick off a dump for the given MariaDB database (provided as a configuration dict) to a named
-    pipe constructed from the given dump path and database name. Use the given log prefix in any
-    log entries.
+    pipe constructed from the given dump path and database name.
 
     Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
     this is a dry run, then don't actually dump anything and return None.
@@ -82,7 +81,7 @@ def execute_dump_command(
 
     if os.path.exists(dump_filename):
         logger.warning(
-            f'{log_prefix}: Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}'
+            f'Skipping duplicate dump of MariaDB database "{database_name}" to {dump_filename}'
         )
         return None
 
@@ -103,9 +102,7 @@ def execute_dump_command(
         + ('--result-file', dump_filename)
     )
 
-    logger.debug(
-        f'{log_prefix}: Dumping MariaDB database "{database_name}" to {dump_filename}{dry_run_label}'
-    )
+    logger.debug(f'Dumping MariaDB database "{database_name}" to {dump_filename}{dry_run_label}')
     if dry_run:
         return None
 
@@ -118,14 +115,14 @@ def execute_dump_command(
     )
 
 
-def get_default_port(databases, config, log_prefix):  # pragma: no cover
+def get_default_port(databases, config):  # pragma: no cover
     return 3306
 
 
-def use_streaming(databases, config, log_prefix):
+def use_streaming(databases, config):
     '''
-    Given a sequence of MariaDB database configuration dicts, a configuration dict (ignored), and a
-    log prefix (ignored), return whether streaming will be using during dumps.
+    Given a sequence of MariaDB database configuration dicts, a configuration dict (ignored), return
+    whether streaming will be using during dumps.
     '''
     return any(databases)
 
@@ -133,7 +130,6 @@ def use_streaming(databases, config, log_prefix):
 def dump_data_sources(
     databases,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
@@ -142,8 +138,7 @@ def dump_data_sources(
     '''
     Dump the given MariaDB databases to a named pipe. The databases are supplied as a sequence of
     dicts, one dict describing each database as per the configuration schema. Use the given
-    borgmatic runtime directory to construct the destination path and the given log prefix in any
-    log entries.
+    borgmatic runtime directory to construct the destination path.
 
     Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
     pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@@ -153,14 +148,12 @@ def dump_data_sources(
     dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
     processes = []
 
-    logger.info(f'{log_prefix}: Dumping MariaDB databases{dry_run_label}')
+    logger.info(f'Dumping MariaDB databases{dry_run_label}')
 
     for database in databases:
         dump_path = make_dump_path(borgmatic_runtime_directory)
         extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
-        dump_database_names = database_names_to_dump(
-            database, extra_environment, log_prefix, dry_run
-        )
+        dump_database_names = database_names_to_dump(database, extra_environment, dry_run)
 
         if not dump_database_names:
             if dry_run:
@@ -175,7 +168,6 @@ def dump_data_sources(
                 processes.append(
                     execute_dump_command(
                         renamed_database,
-                        log_prefix,
                         dump_path,
                         (dump_name,),
                         extra_environment,
@@ -187,7 +179,6 @@ def dump_data_sources(
             processes.append(
                 execute_dump_command(
                     database,
-                    log_prefix,
                     dump_path,
                     dump_database_names,
                     extra_environment,
@@ -207,25 +198,23 @@ def dump_data_sources(
 
 
 def remove_data_source_dumps(
-    databases, config, log_prefix, borgmatic_runtime_directory, dry_run
+    databases, config, borgmatic_runtime_directory, dry_run
 ):  # pragma: no cover
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
-    borgmatic_runtime_directory to construct the destination path and the log prefix in any log
-    entries. If this is a dry run, then don't actually remove anything.
+    borgmatic_runtime_directory to construct the destination path. If this is a dry run, then don't
+    actually remove anything.
     '''
-    dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'MariaDB', log_prefix, dry_run
-    )
+    dump.remove_data_source_dumps(make_dump_path(borgmatic_runtime_directory), 'MariaDB', dry_run)
 
 
 def make_data_source_dump_patterns(
-    databases, config, log_prefix, borgmatic_runtime_directory, name=None
+    databases, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
-    Given a sequence of configurations dicts, a configuration dict, a prefix to log with, the
-    borgmatic runtime directory, and a database name to match, return the corresponding glob
-    patterns to match the database dump in an archive.
+    Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
+    and a database name to match, return the corresponding glob patterns to match the database dump
+    in an archive.
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(config)
 
@@ -243,7 +232,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,
@@ -252,9 +240,9 @@ def restore_data_source_dump(
 ):
     '''
     Restore a database from the given extract stream. The database is supplied as a data source
-    configuration dict, but the given hook configuration is ignored. The given log prefix is used
-    for any log entries. If this is a dry run, then don't actually restore anything. Trigger the
-    given active extract process (an instance of subprocess.Popen) to produce output to consume.
+    configuration dict, but the given hook configuration is ignored. If this is a dry run, then
+    don't actually restore anything. Trigger the given active extract process (an instance of
+    subprocess.Popen) to produce output to consume.
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     hostname = connection_params['hostname'] or data_source.get(
@@ -288,7 +276,7 @@ def restore_data_source_dump(
     )
     extra_environment = {'MYSQL_PWD': password} if password else None
 
-    logger.debug(f"{log_prefix}: Restoring MariaDB database {data_source['name']}{dry_run_label}")
+    logger.debug(f"Restoring MariaDB database {data_source['name']}{dry_run_label}")
     if dry_run:
         return
 

+ 19 - 24
borgmatic/hooks/data_source/mongodb.py

@@ -17,14 +17,14 @@ def make_dump_path(base_directory):  # pragma: no cover
     return dump.make_data_source_dump_path(base_directory, 'mongodb_databases')
 
 
-def get_default_port(databases, config, log_prefix):  # pragma: no cover
+def get_default_port(databases, config):  # pragma: no cover
     return 27017
 
 
-def use_streaming(databases, config, log_prefix):
+def use_streaming(databases, config):
     '''
-    Given a sequence of MongoDB database configuration dicts, a configuration dict (ignored), and a
-    log prefix (ignored), return whether streaming will be using during dumps.
+    Given a sequence of MongoDB database configuration dicts, a configuration dict (ignored), return
+    whether streaming will be using during dumps.
     '''
     return any(database.get('format') != 'directory' for database in databases)
 
@@ -32,7 +32,6 @@ def use_streaming(databases, config, log_prefix):
 def dump_data_sources(
     databases,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
@@ -41,8 +40,7 @@ def dump_data_sources(
     '''
     Dump the given MongoDB databases to a named pipe. The databases are supplied as a sequence of
     dicts, one dict describing each database as per the configuration schema. Use the borgmatic
-    runtime directory to construct the destination path (used for the directory format and the given
-    log prefix in any log entries.
+    runtime directory to construct the destination path (used for the directory format.
 
     Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
     pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@@ -51,7 +49,7 @@ def dump_data_sources(
     '''
     dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
 
-    logger.info(f'{log_prefix}: Dumping MongoDB databases{dry_run_label}')
+    logger.info(f'Dumping MongoDB databases{dry_run_label}')
 
     processes = []
     for database in databases:
@@ -65,7 +63,7 @@ def dump_data_sources(
         dump_format = database.get('format', 'archive')
 
         logger.debug(
-            f'{log_prefix}: Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
+            f'Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
         )
         if dry_run:
             continue
@@ -118,25 +116,23 @@ def build_dump_command(database, dump_filename, dump_format):
 
 
 def remove_data_source_dumps(
-    databases, config, log_prefix, borgmatic_runtime_directory, dry_run
+    databases, config, borgmatic_runtime_directory, dry_run
 ):  # pragma: no cover
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
-    borgmatic_runtime_directory to construct the destination path and the log prefix in any log
-    entries. If this is a dry run, then don't actually remove anything.
+    borgmatic_runtime_directory to construct the destination path. If this is a dry run, then don't
+    actually remove anything.
     '''
-    dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'MongoDB', log_prefix, dry_run
-    )
+    dump.remove_data_source_dumps(make_dump_path(borgmatic_runtime_directory), 'MongoDB', dry_run)
 
 
 def make_data_source_dump_patterns(
-    databases, config, log_prefix, borgmatic_runtime_directory, name=None
+    databases, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
-    Given a sequence of configurations dicts, a configuration dict, a prefix to log with, the
-    borgmatic runtime directory, and a database name to match, return the corresponding glob
-    patterns to match the database dump in an archive.
+    Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
+    and a database name to match, return the corresponding glob patterns to match the database dump
+    in an archive.
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(config)
 
@@ -154,7 +150,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,
@@ -164,9 +159,9 @@ def restore_data_source_dump(
     '''
     Restore a database from the given extract stream. The database is supplied as a data source
     configuration dict, but the given hook configuration is ignored. The given configuration dict is
-    used to construct the destination path, and the given log prefix is used for any log entries. If
-    this is a dry run, then don't actually restore anything. Trigger the given active extract
-    process (an instance of subprocess.Popen) to produce output to consume.
+    used to construct the destination path. If this is a dry run, then don't actually restore
+    anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
+    output to consume.
 
     If the extract process is None, then restore the dump from the filesystem rather than from an
     extract stream.
@@ -181,7 +176,7 @@ def restore_data_source_dump(
         extract_process, data_source, dump_filename, connection_params
     )
 
-    logger.debug(f"{log_prefix}: Restoring MongoDB database {data_source['name']}{dry_run_label}")
+    logger.debug(f"Restoring MongoDB database {data_source['name']}{dry_run_label}")
     if dry_run:
         return
 

+ 25 - 37
borgmatic/hooks/data_source/mysql.py

@@ -25,7 +25,7 @@ def make_dump_path(base_directory):  # pragma: no cover
 SYSTEM_DATABASE_NAMES = ('information_schema', 'mysql', 'performance_schema', 'sys')
 
 
-def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
+def database_names_to_dump(database, extra_environment, dry_run):
     '''
     Given a requested database config, return the corresponding sequence of database names to dump.
     In the case of "all", query for the names of databases on the configured host and return them,
@@ -49,7 +49,7 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
         + ('--skip-column-names', '--batch')
         + ('--execute', 'show schemas')
     )
-    logger.debug(f'{log_prefix}: Querying for "all" MySQL databases to dump')
+    logger.debug('Querying for "all" MySQL databases to dump')
     show_output = execute_command_and_capture_output(
         show_command, extra_environment=extra_environment
     )
@@ -62,12 +62,11 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
 
 
 def execute_dump_command(
-    database, log_prefix, dump_path, database_names, extra_environment, dry_run, dry_run_label
+    database, dump_path, database_names, extra_environment, dry_run, dry_run_label
 ):
     '''
     Kick off a dump for the given MySQL/MariaDB database (provided as a configuration dict) to a
-    named pipe constructed from the given dump path and database name. Use the given log prefix in
-    any log entries.
+    named pipe constructed from the given dump path and database name.
 
     Return a subprocess.Popen instance for the dump process ready to spew to a named pipe. But if
     this is a dry run, then don't actually dump anything and return None.
@@ -82,7 +81,7 @@ def execute_dump_command(
 
     if os.path.exists(dump_filename):
         logger.warning(
-            f'{log_prefix}: Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
+            f'Skipping duplicate dump of MySQL database "{database_name}" to {dump_filename}'
         )
         return None
 
@@ -102,9 +101,7 @@ def execute_dump_command(
         + ('--result-file', dump_filename)
     )
 
-    logger.debug(
-        f'{log_prefix}: Dumping MySQL database "{database_name}" to {dump_filename}{dry_run_label}'
-    )
+    logger.debug(f'Dumping MySQL database "{database_name}" to {dump_filename}{dry_run_label}')
     if dry_run:
         return None
 
@@ -117,14 +114,14 @@ def execute_dump_command(
     )
 
 
-def get_default_port(databases, config, log_prefix):  # pragma: no cover
+def get_default_port(databases, config):  # pragma: no cover
     return 3306
 
 
-def use_streaming(databases, config, log_prefix):
+def use_streaming(databases, config):
     '''
-    Given a sequence of MySQL database configuration dicts, a configuration dict (ignored), and a
-    log prefix (ignored), return whether streaming will be using during dumps.
+    Given a sequence of MySQL database configuration dicts, a configuration dict (ignored), return
+    whether streaming will be using during dumps.
     '''
     return any(databases)
 
@@ -132,7 +129,6 @@ def use_streaming(databases, config, log_prefix):
 def dump_data_sources(
     databases,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
@@ -141,8 +137,7 @@ def dump_data_sources(
     '''
     Dump the given MySQL/MariaDB databases to a named pipe. The databases are supplied as a sequence
     of dicts, one dict describing each database as per the configuration schema. Use the given
-    borgmatic runtime directory to construct the destination path and the given log prefix in any
-    log entries.
+    borgmatic runtime directory to construct the destination path.
 
     Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
     pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@@ -152,14 +147,12 @@ def dump_data_sources(
     dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
     processes = []
 
-    logger.info(f'{log_prefix}: Dumping MySQL databases{dry_run_label}')
+    logger.info(f'Dumping MySQL databases{dry_run_label}')
 
     for database in databases:
         dump_path = make_dump_path(borgmatic_runtime_directory)
         extra_environment = {'MYSQL_PWD': database['password']} if 'password' in database else None
-        dump_database_names = database_names_to_dump(
-            database, extra_environment, log_prefix, dry_run
-        )
+        dump_database_names = database_names_to_dump(database, extra_environment, dry_run)
 
         if not dump_database_names:
             if dry_run:
@@ -174,7 +167,6 @@ def dump_data_sources(
                 processes.append(
                     execute_dump_command(
                         renamed_database,
-                        log_prefix,
                         dump_path,
                         (dump_name,),
                         extra_environment,
@@ -186,7 +178,6 @@ def dump_data_sources(
             processes.append(
                 execute_dump_command(
                     database,
-                    log_prefix,
                     dump_path,
                     dump_database_names,
                     extra_environment,
@@ -206,25 +197,23 @@ def dump_data_sources(
 
 
 def remove_data_source_dumps(
-    databases, config, log_prefix, borgmatic_runtime_directory, dry_run
+    databases, config, borgmatic_runtime_directory, dry_run
 ):  # pragma: no cover
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
-    borgmatic runtime directory to construct the destination path and the log prefix in any log
-    entries. If this is a dry run, then don't actually remove anything.
+    borgmatic runtime directory to construct the destination path. If this is a dry run, then don't
+    actually remove anything.
     '''
-    dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'MySQL', log_prefix, dry_run
-    )
+    dump.remove_data_source_dumps(make_dump_path(borgmatic_runtime_directory), 'MySQL', dry_run)
 
 
 def make_data_source_dump_patterns(
-    databases, config, log_prefix, borgmatic_runtime_directory, name=None
+    databases, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
-    Given a sequence of configurations dicts, a configuration dict, a prefix to log with, the
-    borgmatic runtime directory, and a database name to match, return the corresponding glob
-    patterns to match the database dump in an archive.
+    Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
+    and a database name to match, return the corresponding glob patterns to match the database dump
+    in an archive.
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(config)
 
@@ -242,7 +231,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,
@@ -251,9 +239,9 @@ def restore_data_source_dump(
 ):
     '''
     Restore a database from the given extract stream. The database is supplied as a data source
-    configuration dict, but the given hook configuration is ignored. The given log prefix is used
-    for any log entries. If this is a dry run, then don't actually restore anything. Trigger the
-    given active extract process (an instance of subprocess.Popen) to produce output to consume.
+    configuration dict, but the given hook configuration is ignored. If this is a dry run, then
+    don't actually restore anything. Trigger the given active extract process (an instance of
+    subprocess.Popen) to produce output to consume.
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     hostname = connection_params['hostname'] or data_source.get(
@@ -287,7 +275,7 @@ def restore_data_source_dump(
     )
     extra_environment = {'MYSQL_PWD': password} if password else None
 
-    logger.debug(f"{log_prefix}: Restoring MySQL database {data_source['name']}{dry_run_label}")
+    logger.debug(f"Restoring MySQL database {data_source['name']}{dry_run_label}")
     if dry_run:
         return
 

+ 23 - 31
borgmatic/hooks/data_source/postgresql.py

@@ -58,7 +58,7 @@ def make_extra_environment(database, restore_connection_params=None):
 EXCLUDED_DATABASE_NAMES = ('template0', 'template1')
 
 
-def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
+def database_names_to_dump(database, extra_environment, dry_run):
     '''
     Given a requested database config, return the corresponding sequence of database names to dump.
     In the case of "all" when a database format is given, query for the names of databases on the
@@ -85,7 +85,7 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
         + (('--username', database['username']) if 'username' in database else ())
         + (tuple(database['list_options'].split(' ')) if 'list_options' in database else ())
     )
-    logger.debug(f'{log_prefix}: Querying for "all" PostgreSQL databases to dump')
+    logger.debug('Querying for "all" PostgreSQL databases to dump')
     list_output = execute_command_and_capture_output(
         list_command, extra_environment=extra_environment
     )
@@ -97,14 +97,14 @@ def database_names_to_dump(database, extra_environment, log_prefix, dry_run):
     )
 
 
-def get_default_port(databases, config, log_prefix):  # pragma: no cover
+def get_default_port(databases, config):  # pragma: no cover
     return 5432
 
 
-def use_streaming(databases, config, log_prefix):
+def use_streaming(databases, config):
     '''
-    Given a sequence of PostgreSQL database configuration dicts, a configuration dict (ignored), and
-    a log prefix (ignored), return whether streaming will be using during dumps.
+    Given a sequence of PostgreSQL database configuration dicts, a configuration dict (ignored),
+    return whether streaming will be using during dumps.
     '''
     return any(database.get('format') != 'directory' for database in databases)
 
@@ -112,7 +112,6 @@ def use_streaming(databases, config, log_prefix):
 def dump_data_sources(
     databases,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
@@ -121,8 +120,7 @@ def dump_data_sources(
     '''
     Dump the given PostgreSQL databases to a named pipe. The databases are supplied as a sequence of
     dicts, one dict describing each database as per the configuration schema. Use the given
-    borgmatic runtime directory to construct the destination path and the given log prefix in any
-    log entries.
+    borgmatic runtime directory to construct the destination path.
 
     Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
     pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@@ -134,14 +132,12 @@ def dump_data_sources(
     dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
     processes = []
 
-    logger.info(f'{log_prefix}: Dumping PostgreSQL databases{dry_run_label}')
+    logger.info(f'Dumping PostgreSQL databases{dry_run_label}')
 
     for database in databases:
         extra_environment = make_extra_environment(database)
         dump_path = make_dump_path(borgmatic_runtime_directory)
-        dump_database_names = database_names_to_dump(
-            database, extra_environment, log_prefix, dry_run
-        )
+        dump_database_names = database_names_to_dump(database, extra_environment, dry_run)
 
         if not dump_database_names:
             if dry_run:
@@ -164,7 +160,7 @@ def dump_data_sources(
             )
             if os.path.exists(dump_filename):
                 logger.warning(
-                    f'{log_prefix}: Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
+                    f'Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
                 )
                 continue
 
@@ -198,7 +194,7 @@ def dump_data_sources(
             )
 
             logger.debug(
-                f'{log_prefix}: Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
+                f'Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
             )
             if dry_run:
                 continue
@@ -232,25 +228,25 @@ def dump_data_sources(
 
 
 def remove_data_source_dumps(
-    databases, config, log_prefix, borgmatic_runtime_directory, dry_run
+    databases, config, borgmatic_runtime_directory, dry_run
 ):  # pragma: no cover
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
-    borgmatic runtime directory to construct the destination path and the log prefix in any log
-    entries. If this is a dry run, then don't actually remove anything.
+    borgmatic runtime directory to construct the destination path. If this is a dry run, then don't
+    actually remove anything.
     '''
     dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'PostgreSQL', log_prefix, dry_run
+        make_dump_path(borgmatic_runtime_directory), 'PostgreSQL', dry_run
     )
 
 
 def make_data_source_dump_patterns(
-    databases, config, log_prefix, borgmatic_runtime_directory, name=None
+    databases, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
-    Given a sequence of configurations dicts, a configuration dict, a prefix to log with, the
-    borgmatic runtime directory, and a database name to match, return the corresponding glob
-    patterns to match the database dump in an archive.
+    Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
+    and a database name to match, return the corresponding glob patterns to match the database dump
+    in an archive.
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(config)
 
@@ -268,7 +264,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,
@@ -278,10 +273,9 @@ def restore_data_source_dump(
     '''
     Restore a database from the given extract stream. The database is supplied as a data source
     configuration dict, but the given hook configuration is ignored. The given borgmatic runtime
-    directory is used to construct the destination path (used for the directory format), and the
-    given log prefix is used for any log entries. If this is a dry run, then don't actually restore
-    anything. Trigger the given active extract process (an instance of subprocess.Popen) to produce
-    output to consume.
+    directory is used to construct the destination path (used for the directory format). If this is
+    a dry run, then don't actually restore anything. Trigger the given active extract process (an
+    instance of subprocess.Popen) to produce output to consume.
 
     If the extract process is None, then restore the dump from the filesystem rather than from an
     extract stream.
@@ -354,9 +348,7 @@ def restore_data_source_dump(
         data_source, restore_connection_params=connection_params
     )
 
-    logger.debug(
-        f"{log_prefix}: Restoring PostgreSQL database {data_source['name']}{dry_run_label}"
-    )
+    logger.debug(f"Restoring PostgreSQL database {data_source['name']}{dry_run_label}")
     if dry_run:
         return
 

+ 22 - 26
borgmatic/hooks/data_source/sqlite.py

@@ -17,14 +17,14 @@ def make_dump_path(base_directory):  # pragma: no cover
     return dump.make_data_source_dump_path(base_directory, 'sqlite_databases')
 
 
-def get_default_port(databases, config, log_prefix):  # pragma: no cover
+def get_default_port(databases, config):  # pragma: no cover
     return None  # SQLite doesn't use a port.
 
 
-def use_streaming(databases, config, log_prefix):
+def use_streaming(databases, config):
     '''
-    Given a sequence of SQLite database configuration dicts, a configuration dict (ignored), and a
-    log prefix (ignored), return whether streaming will be using during dumps.
+    Given a sequence of SQLite database configuration dicts, a configuration dict (ignored), return
+    whether streaming will be using during dumps.
     '''
     return any(databases)
 
@@ -32,7 +32,6 @@ def use_streaming(databases, config, log_prefix):
 def dump_data_sources(
     databases,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
@@ -41,7 +40,7 @@ def dump_data_sources(
     '''
     Dump the given SQLite databases to a named pipe. The databases are supplied as a sequence of
     configuration dicts, as per the configuration schema. Use the given borgmatic runtime directory
-    to construct the destination path and the given log prefix in any log entries.
+    to construct the destination path.
 
     Return a sequence of subprocess.Popen instances for the dump processes ready to spew to a named
     pipe. But if this is a dry run, then don't actually dump anything and return an empty sequence.
@@ -51,7 +50,7 @@ def dump_data_sources(
     dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
     processes = []
 
-    logger.info(f'{log_prefix}: Dumping SQLite databases{dry_run_label}')
+    logger.info(f'Dumping SQLite databases{dry_run_label}')
 
     for database in databases:
         database_path = database['path']
@@ -60,7 +59,7 @@ def dump_data_sources(
             logger.warning('The "all" database name has no meaning for SQLite databases')
         if not os.path.exists(database_path):
             logger.warning(
-                f'{log_prefix}: No SQLite database at {database_path}; an empty database will be created and dumped'
+                f'No SQLite database at {database_path}; an empty database will be created and dumped'
             )
 
         dump_path = make_dump_path(borgmatic_runtime_directory)
@@ -68,7 +67,7 @@ def dump_data_sources(
 
         if os.path.exists(dump_filename):
             logger.warning(
-                f'{log_prefix}: Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
+                f'Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
             )
             continue
 
@@ -80,7 +79,7 @@ def dump_data_sources(
             shlex.quote(dump_filename),
         )
         logger.debug(
-            f'{log_prefix}: Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
+            f'Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
         )
         if dry_run:
             continue
@@ -99,25 +98,23 @@ def dump_data_sources(
 
 
 def remove_data_source_dumps(
-    databases, config, log_prefix, borgmatic_runtime_directory, dry_run
+    databases, config, borgmatic_runtime_directory, dry_run
 ):  # pragma: no cover
     '''
     Remove all database dump files for this hook regardless of the given databases. Use the
-    borgmatic runtime directory to construct the destination path and the log prefix in any log
-    entries. If this is a dry run, then don't actually remove anything.
+    borgmatic runtime directory to construct the destination path. If this is a dry run, then don't
+    actually remove anything.
     '''
-    dump.remove_data_source_dumps(
-        make_dump_path(borgmatic_runtime_directory), 'SQLite', log_prefix, dry_run
-    )
+    dump.remove_data_source_dumps(make_dump_path(borgmatic_runtime_directory), 'SQLite', dry_run)
 
 
 def make_data_source_dump_patterns(
-    databases, config, log_prefix, borgmatic_runtime_directory, name=None
+    databases, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
-    Given a sequence of configurations dicts, a configuration dict, a prefix to log with, the
-    borgmatic runtime directory, and a database name to match, return the corresponding glob
-    patterns to match the database dump in an archive.
+    Given a sequence of configurations dicts, a configuration dict, the borgmatic runtime directory,
+    and a database name to match, return the corresponding glob patterns to match the database dump
+    in an archive.
     '''
     borgmatic_source_directory = borgmatic.config.paths.get_borgmatic_source_directory(config)
 
@@ -135,7 +132,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,
@@ -144,22 +140,22 @@ def restore_data_source_dump(
 ):
     '''
     Restore a database from the given extract stream. The database is supplied as a data source
-    configuration dict, but the given hook configuration is ignored. The given log prefix is used
-    for any log entries. If this is a dry run, then don't actually restore anything. Trigger the
-    given active extract process (an instance of subprocess.Popen) to produce output to consume.
+    configuration dict, but the given hook configuration is ignored. If this is a dry run, then
+    don't actually restore anything. Trigger the given active extract process (an instance of
+    subprocess.Popen) to produce output to consume.
     '''
     dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
     database_path = connection_params['restore_path'] or data_source.get(
         'restore_path', data_source.get('path')
     )
 
-    logger.debug(f'{log_prefix}: Restoring SQLite database at {database_path}{dry_run_label}')
+    logger.debug(f'Restoring SQLite database at {database_path}{dry_run_label}')
     if dry_run:
         return
 
     try:
         os.remove(database_path)
-        logger.warning(f'{log_prefix}: Removed existing SQLite database at {database_path}')
+        logger.warning(f'Removed existing SQLite database at {database_path}')
     except FileNotFoundError:  # pragma: no cover
         pass
 

+ 22 - 28
borgmatic/hooks/data_source/zfs.py

@@ -13,7 +13,7 @@ import borgmatic.hooks.data_source.snapshot
 logger = logging.getLogger(__name__)
 
 
-def use_streaming(hook_config, config, log_prefix):  # pragma: no cover
+def use_streaming(hook_config, config):  # pragma: no cover
     '''
     Return whether dump streaming is used for this hook. (Spoiler: It isn't.)
     '''
@@ -189,26 +189,25 @@ def make_borg_snapshot_pattern(pattern, normalized_runtime_directory):
 def dump_data_sources(
     hook_config,
     config,
-    log_prefix,
     config_paths,
     borgmatic_runtime_directory,
     patterns,
     dry_run,
 ):
     '''
-    Given a ZFS configuration dict, a configuration dict, a log prefix, the borgmatic configuration
-    file paths, the borgmatic runtime directory, the configured patterns, and whether this is a dry
-    run, auto-detect and snapshot any ZFS dataset mount points listed in the given patterns and any
+    Given a ZFS configuration dict, a configuration dict, the borgmatic configuration file paths,
+    the borgmatic runtime directory, the configured patterns, and whether this is a dry run,
+    auto-detect and snapshot any ZFS dataset mount points listed in the given patterns and any
     dataset with a borgmatic-specific user property. Also update those patterns, replacing dataset
     mount points with corresponding snapshot directories so they get stored in the Borg archive
-    instead. Use the log prefix in any log entries.
+    instead.
 
     Return an empty sequence, since there are no ongoing dump processes from this hook.
 
     If this is a dry run, then don't actually snapshot anything.
     '''
     dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'{log_prefix}: Snapshotting ZFS datasets{dry_run_label}')
+    logger.info(f'Snapshotting ZFS datasets{dry_run_label}')
 
     # List ZFS datasets to get their mount points.
     zfs_command = hook_config.get('zfs_command', 'zfs')
@@ -219,12 +218,12 @@ def dump_data_sources(
     normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
 
     if not requested_datasets:
-        logger.warning(f'{log_prefix}: No ZFS datasets found to snapshot{dry_run_label}')
+        logger.warning(f'No ZFS datasets found to snapshot{dry_run_label}')
 
     for dataset in requested_datasets:
         full_snapshot_name = f'{dataset.name}@{snapshot_name}'
         logger.debug(
-            f'{log_prefix}: Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}'
+            f'Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}'
         )
 
         if not dry_run:
@@ -239,7 +238,7 @@ def dump_data_sources(
         )
 
         logger.debug(
-            f'{log_prefix}: Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            f'Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}'
         )
 
         if dry_run:
@@ -306,12 +305,12 @@ def get_all_snapshots(zfs_command):
     return tuple(line.rstrip() for line in list_output.splitlines())
 
 
-def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_directory, dry_run):
+def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):
     '''
-    Given a ZFS configuration dict, a configuration dict, a log prefix, the borgmatic runtime
-    directory, and whether this is a dry run, unmount and destroy any ZFS snapshots created by
-    borgmatic. Use the log prefix in any log entries. If this is a dry run or ZFS isn't configured
-    in borgmatic's configuration, then don't actually remove anything.
+    Given a ZFS configuration dict, a configuration dict, the borgmatic runtime directory, and
+    whether this is a dry run, unmount and destroy any ZFS snapshots created by borgmatic. If this
+    is a dry run or ZFS isn't configured in borgmatic's configuration, then don't actually remove
+    anything.
     '''
     if hook_config is None:
         return
@@ -324,10 +323,10 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
     try:
         dataset_mount_points = get_all_dataset_mount_points(zfs_command)
     except FileNotFoundError:
-        logger.debug(f'{log_prefix}: Could not find "{zfs_command}" command')
+        logger.debug(f'Could not find "{zfs_command}" command')
         return
     except subprocess.CalledProcessError as error:
-        logger.debug(f'{log_prefix}: {error}')
+        logger.debug(error)
         return
 
     snapshots_glob = os.path.join(
@@ -336,9 +335,7 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         ),
         'zfs_snapshots',
     )
-    logger.debug(
-        f'{log_prefix}: Looking for snapshots to remove in {snapshots_glob}{dry_run_label}'
-    )
+    logger.debug(f'Looking for snapshots to remove in {snapshots_glob}{dry_run_label}')
     umount_command = hook_config.get('umount_command', 'umount')
 
     for snapshots_directory in glob.glob(snapshots_glob):
@@ -363,18 +360,16 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
                 if not os.path.isdir(snapshot_mount_path):
                     continue
 
-            logger.debug(
-                f'{log_prefix}: Unmounting ZFS snapshot at {snapshot_mount_path}{dry_run_label}'
-            )
+            logger.debug(f'Unmounting ZFS snapshot at {snapshot_mount_path}{dry_run_label}')
 
             if not dry_run:
                 try:
                     unmount_snapshot(umount_command, snapshot_mount_path)
                 except FileNotFoundError:
-                    logger.debug(f'{log_prefix}: Could not find "{umount_command}" command')
+                    logger.debug(f'Could not find "{umount_command}" command')
                     return
                 except subprocess.CalledProcessError as error:
-                    logger.debug(f'{log_prefix}: {error}')
+                    logger.debug(error)
                     return
 
         if not dry_run:
@@ -388,14 +383,14 @@ def remove_data_source_dumps(hook_config, config, log_prefix, borgmatic_runtime_
         if not full_snapshot_name.split('@')[-1].startswith(BORGMATIC_SNAPSHOT_PREFIX):
             continue
 
-        logger.debug(f'{log_prefix}: Destroying ZFS snapshot {full_snapshot_name}{dry_run_label}')
+        logger.debug(f'Destroying ZFS snapshot {full_snapshot_name}{dry_run_label}')
 
         if not dry_run:
             destroy_snapshot(zfs_command, full_snapshot_name)
 
 
 def make_data_source_dump_patterns(
-    hook_config, config, log_prefix, borgmatic_runtime_directory, name=None
+    hook_config, config, borgmatic_runtime_directory, name=None
 ):  # pragma: no cover
     '''
     Restores aren't implemented, because stored files can be extracted directly with "extract".
@@ -406,7 +401,6 @@ def make_data_source_dump_patterns(
 def restore_data_source_dump(
     hook_config,
     config,
-    log_prefix,
     data_source,
     dry_run,
     extract_process,

+ 18 - 19
borgmatic/hooks/dispatch.py

@@ -21,12 +21,12 @@ def get_submodule_names(parent_module):  # pragma: no cover
     return tuple(module_info.name for module_info in pkgutil.iter_modules(parent_module.__path__))
 
 
-def call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs):
+def call_hook(function_name, config, hook_name, *args, **kwargs):
     '''
-    Given a configuration dict and a prefix to use in log entries, call the requested function of
-    the Python module corresponding to the given hook name. Supply that call with the configuration
-    for this hook (if any), the log prefix, and any given args and kwargs. Return the return value
-    of that call or None if the module in question is not a hook.
+    Given a configuration dict, call the requested function of the Python module corresponding to
+    the given hook name. Supply that call with the configuration for this hook (if any) and any
+    given args and kwargs. Return the return value of that call or None if the module in question is
+    not a hook.
 
     Raise ValueError if the hook name is unknown.
     Raise AttributeError if the function name is not found in the module.
@@ -54,17 +54,16 @@ def call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs):
     else:
         raise ValueError(f'Unknown hook name: {hook_name}')
 
-    logger.debug(f'{log_prefix}: Calling {hook_name} hook function {function_name}')
+    logger.debug(f'Calling {hook_name} hook function {function_name}')
 
-    return getattr(module, function_name)(hook_config, config, log_prefix, *args, **kwargs)
+    return getattr(module, function_name)(hook_config, config, *args, **kwargs)
 
 
-def call_hooks(function_name, config, log_prefix, hook_type, *args, **kwargs):
+def call_hooks(function_name, config, hook_type, *args, **kwargs):
     '''
-    Given a configuration dict and a prefix to use in log entries, call the requested function of
-    the Python module corresponding to each hook of the given hook type (either "data_source" or
-    "monitoring"). Supply each call with the configuration for that hook, the log prefix, and any
-    given args and kwargs.
+    Given a configuration dict, call the requested function of the Python module corresponding to
+    each hook of the given hook type (either "data_source" or "monitoring"). Supply each call with
+    the configuration for that hook, and any given args and kwargs.
 
     Collect any return values into a dict from module name to return value. Note that the module
     name is the name of the hook module itself, which might be different from the hook configuration
@@ -78,7 +77,7 @@ def call_hooks(function_name, config, log_prefix, hook_type, *args, **kwargs):
     Raise anything else that a called function raises. An error stops calls to subsequent functions.
     '''
     return {
-        hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
+        hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         for hook_name in get_submodule_names(
             importlib.import_module(f'borgmatic.hooks.{hook_type.value}')
         )
@@ -86,18 +85,18 @@ def call_hooks(function_name, config, log_prefix, hook_type, *args, **kwargs):
     }
 
 
-def call_hooks_even_if_unconfigured(function_name, config, log_prefix, hook_type, *args, **kwargs):
+def call_hooks_even_if_unconfigured(function_name, config, hook_type, *args, **kwargs):
     '''
-    Given a configuration dict and a prefix to use in log entries, call the requested function of
-    the Python module corresponding to each hook of the given hook type (either "data_source" or
-    "monitoring"). Supply each call with the configuration for that hook, the log prefix, and any
-    given args and kwargs. Collect any return values into a dict from hook name to return value.
+    Given a configuration dict, call the requested function of the Python module corresponding to
+    each hook of the given hook type (either "data_source" or "monitoring"). Supply each call with
+    the configuration for that hook and any given args and kwargs. Collect any return values into a
+    dict from hook name to return value.
 
     Raise AttributeError if the function name is not found in the module.
     Raise anything else that a called function raises. An error stops calls to subsequent functions.
     '''
     return {
-        hook_name: call_hook(function_name, config, log_prefix, hook_name, *args, **kwargs)
+        hook_name: call_hook(function_name, config, hook_name, *args, **kwargs)
         for hook_name in get_submodule_names(
             importlib.import_module(f'borgmatic.hooks.{hook_type.value}')
         )

+ 4 - 4
borgmatic/hooks/monitoring/apprise.py

@@ -66,12 +66,12 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     )
 
     if not hook_config.get('services'):
-        logger.info(f'{config_filename}: No Apprise services to ping')
+        logger.info('No Apprise services to ping')
         return
 
     dry_run_string = ' (dry run; not actually pinging)' if dry_run else ''
     labels_string = ', '.join(map(operator.itemgetter('label'), hook_config.get('services')))
-    logger.info(f'{config_filename}: Pinging Apprise services: {labels_string}{dry_run_string}')
+    logger.info(f'Pinging Apprise services: {labels_string}{dry_run_string}')
 
     apprise_object = apprise.Apprise()
     apprise_object.add(list(map(operator.itemgetter('url'), hook_config.get('services'))))
@@ -100,10 +100,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     )
 
     if result is False:
-        logger.warning(f'{config_filename}: Error sending some Apprise notifications')
+        logger.warning('Error sending some Apprise notifications')
 
 
-def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
+def destroy_monitor(hook_config, config, monitoring_log_level, dry_run):
     '''
     Remove the monitor handler that was added to the root logger. This prevents the handler from
     getting reused by other instances of this monitor.

+ 5 - 9
borgmatic/hooks/monitoring/cronhub.py

@@ -28,9 +28,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     filename in any log entries. If this is a dry run, then don't actually ping anything.
     '''
     if state not in MONITOR_STATE_TO_CRONHUB:
-        logger.debug(
-            f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronhub hook'
-        )
+        logger.debug(f'Ignoring unsupported monitoring {state.name.lower()} in Cronhub hook')
         return
 
     dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
@@ -41,8 +39,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         .replace('/ping/', formatted_state)
     )
 
-    logger.info(f'{config_filename}: Pinging Cronhub {state.name.lower()}{dry_run_label}')
-    logger.debug(f'{config_filename}: Using Cronhub ping URL {ping_url}')
+    logger.info(f'Pinging Cronhub {state.name.lower()}{dry_run_label}')
+    logger.debug(f'Using Cronhub ping URL {ping_url}')
 
     if not dry_run:
         logging.getLogger('urllib3').setLevel(logging.ERROR)
@@ -51,12 +49,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             if not response.ok:
                 response.raise_for_status()
         except requests.exceptions.RequestException as error:
-            logger.warning(f'{config_filename}: Cronhub error: {error}')
+            logger.warning(f'Cronhub error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 5 - 9
borgmatic/hooks/monitoring/cronitor.py

@@ -28,16 +28,14 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     filename in any log entries. If this is a dry run, then don't actually ping anything.
     '''
     if state not in MONITOR_STATE_TO_CRONITOR:
-        logger.debug(
-            f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in Cronitor hook'
-        )
+        logger.debug(f'Ignoring unsupported monitoring {state.name.lower()} in Cronitor hook')
         return
 
     dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
     ping_url = f"{hook_config['ping_url']}/{MONITOR_STATE_TO_CRONITOR[state]}"
 
-    logger.info(f'{config_filename}: Pinging Cronitor {state.name.lower()}{dry_run_label}')
-    logger.debug(f'{config_filename}: Using Cronitor ping URL {ping_url}')
+    logger.info(f'Pinging Cronitor {state.name.lower()}{dry_run_label}')
+    logger.debug(f'Using Cronitor ping URL {ping_url}')
 
     if not dry_run:
         logging.getLogger('urllib3').setLevel(logging.ERROR)
@@ -46,12 +44,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             if not response.ok:
                 response.raise_for_status()
         except requests.exceptions.RequestException as error:
-            logger.warning(f'{config_filename}: Cronitor error: {error}')
+            logger.warning(f'Cronitor error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 6 - 10
borgmatic/hooks/monitoring/healthchecks.py

@@ -55,9 +55,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
 
     if 'states' in hook_config and state.name.lower() not in hook_config['states']:
-        logger.info(
-            f'{config_filename}: Skipping Healthchecks {state.name.lower()} ping due to configured states'
-        )
+        logger.info(f'Skipping Healthchecks {state.name.lower()} ping due to configured states')
         return
 
     ping_url_is_uuid = re.search(r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', ping_url)
@@ -68,14 +66,12 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
 
     if hook_config.get('create_slug'):
         if ping_url_is_uuid:
-            logger.warning(
-                f'{config_filename}: Healthchecks UUIDs do not support auto provisionning; ignoring'
-            )
+            logger.warning('Healthchecks UUIDs do not support auto provisionning; ignoring')
         else:
             ping_url = f'{ping_url}?create=1'
 
-    logger.info(f'{config_filename}: Pinging Healthchecks {state.name.lower()}{dry_run_label}')
-    logger.debug(f'{config_filename}: Using Healthchecks ping URL {ping_url}')
+    logger.info(f'Pinging Healthchecks {state.name.lower()}{dry_run_label}')
+    logger.debug(f'Using Healthchecks ping URL {ping_url}')
 
     if state in (monitor.State.FINISH, monitor.State.FAIL, monitor.State.LOG):
         payload = borgmatic.hooks.monitoring.logs.format_buffered_logs_for_payload(
@@ -93,10 +89,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             if not response.ok:
                 response.raise_for_status()
         except requests.exceptions.RequestException as error:
-            logger.warning(f'{config_filename}: Healthchecks error: {error}')
+            logger.warning(f'Healthchecks error: {error}')
 
 
-def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
+def destroy_monitor(hook_config, config, monitoring_log_level, dry_run):
     '''
     Remove the monitor handler that was added to the root logger. This prevents the handler from
     getting reused by other instances of this monitor.

+ 2 - 2
borgmatic/hooks/monitoring/loki.py

@@ -139,10 +139,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     for handler in tuple(logging.getLogger().handlers):
         if isinstance(handler, Loki_log_handler):
             if state in MONITOR_STATE_TO_LOKI.keys():
-                handler.raw(f'{config_filename}: {MONITOR_STATE_TO_LOKI[state]} backup')
+                handler.raw(f'{MONITOR_STATE_TO_LOKI[state]} backup')
 
 
-def destroy_monitor(hook_config, config, config_filename, monitoring_log_level, dry_run):
+def destroy_monitor(hook_config, config, monitoring_log_level, dry_run):
     '''
     Remove the monitor handler that was added to the root logger.
     '''

+ 8 - 14
borgmatic/hooks/monitoring/ntfy.py

@@ -37,8 +37,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         base_url = hook_config.get('server', 'https://ntfy.sh')
         topic = hook_config.get('topic')
 
-        logger.info(f'{config_filename}: Pinging ntfy topic {topic}{dry_run_label}')
-        logger.debug(f'{config_filename}: Using Ntfy ping URL {base_url}/{topic}')
+        logger.info(f'Pinging ntfy topic {topic}{dry_run_label}')
+        logger.debug(f'Using Ntfy ping URL {base_url}/{topic}')
 
         headers = {
             'X-Title': state_config.get('title'),
@@ -55,20 +55,16 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if access_token is not None:
             if username or password:
                 logger.warning(
-                    f'{config_filename}: ntfy access_token is set but so is username/password, only using access_token'
+                    'ntfy access_token is set but so is username/password, only using access_token'
                 )
             auth = requests.auth.HTTPBasicAuth('', access_token)
         elif (username and password) is not None:
             auth = requests.auth.HTTPBasicAuth(username, password)
-            logger.info(f'{config_filename}: Using basic auth with user {username} for ntfy')
+            logger.info(f'Using basic auth with user {username} for ntfy')
         elif username is not None:
-            logger.warning(
-                f'{config_filename}: Password missing for ntfy authentication, defaulting to no auth'
-            )
+            logger.warning('Password missing for ntfy authentication, defaulting to no auth')
         elif password is not None:
-            logger.warning(
-                f'{config_filename}: Username missing for ntfy authentication, defaulting to no auth'
-            )
+            logger.warning('Username missing for ntfy authentication, defaulting to no auth')
 
         if not dry_run:
             logging.getLogger('urllib3').setLevel(logging.ERROR)
@@ -77,12 +73,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
                 if not response.ok:
                     response.raise_for_status()
             except requests.exceptions.RequestException as error:
-                logger.warning(f'{config_filename}: ntfy error: {error}')
+                logger.warning(f'ntfy error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 5 - 7
borgmatic/hooks/monitoring/pagerduty.py

@@ -29,12 +29,12 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     '''
     if state != monitor.State.FAIL:
         logger.debug(
-            f'{config_filename}: Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
+            f'Ignoring unsupported monitoring {state.name.lower()} in PagerDuty hook',
         )
         return
 
     dry_run_label = ' (dry run; not actually sending)' if dry_run else ''
-    logger.info(f'{config_filename}: Sending failure event to PagerDuty {dry_run_label}')
+    logger.info(f'Sending failure event to PagerDuty {dry_run_label}')
 
     if dry_run:
         return
@@ -61,7 +61,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             },
         }
     )
-    logger.debug(f'{config_filename}: Using PagerDuty payload: {payload}')
+    logger.debug(f'Using PagerDuty payload: {payload}')
 
     logging.getLogger('urllib3').setLevel(logging.ERROR)
     try:
@@ -69,12 +69,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if not response.ok:
             response.raise_for_status()
     except requests.exceptions.RequestException as error:
-        logger.warning(f'{config_filename}: PagerDuty error: {error}')
+        logger.warning(f'PagerDuty error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 5 - 7
borgmatic/hooks/monitoring/pushover.py

@@ -35,14 +35,14 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     token = hook_config.get('token')
     user = hook_config.get('user')
 
-    logger.info(f'{config_filename}: Updating Pushover{dry_run_label}')
+    logger.info(f'Updating Pushover{dry_run_label}')
 
     if state_config.get('priority') == EMERGENCY_PRIORITY:
         if 'expire' not in state_config:
-            logger.info(f'{config_filename}: Setting expire to default (10 min).')
+            logger.info('Setting expire to default (10 min)')
             state_config['expire'] = 600
         if 'retry' not in state_config:
-            logger.info(f'{config_filename}: Setting retry to default (30 sec).')
+            logger.info('Setting retry to default (30 sec)')
             state_config['retry'] = 30
     else:
         if 'expire' in state_config or 'retry' in state_config:
@@ -75,12 +75,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             if not response.ok:
                 response.raise_for_status()
         except requests.exceptions.RequestException as error:
-            logger.warning(f'{config_filename}: Pushover error: {error}')
+            logger.warning(f'Pushover error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 6 - 10
borgmatic/hooks/monitoring/sentry.py

@@ -38,15 +38,13 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     match = DATA_SOURCE_NAME_URL_PATTERN.match(data_source_name_url)
 
     if not match:
-        logger.warning(
-            'f{config_filename}: Invalid Sentry data source name URL: {data_source_name_url}'
-        )
+        logger.warning(f'Invalid Sentry data source name URL: {data_source_name_url}')
         return
 
     cron_url = f'{match.group("protocol")}://{match.group("hostname")}/api/{match.group("project_id")}/cron/{monitor_slug}/{match.group("username")}/'
 
-    logger.info(f'{config_filename}: Pinging Sentry {state.name.lower()}{dry_run_label}')
-    logger.debug(f'{config_filename}: Using Sentry cron URL {cron_url}')
+    logger.info(f'Pinging Sentry {state.name.lower()}{dry_run_label}')
+    logger.debug(f'Using Sentry cron URL {cron_url}')
 
     status = {
         'start': 'in_progress',
@@ -55,7 +53,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     }.get(state.name.lower())
 
     if not status:
-        logger.warning('f{config_filename}: Invalid Sentry state')
+        logger.warning('Invalid Sentry state')
         return
 
     if dry_run:
@@ -67,12 +65,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if not response.ok:
             response.raise_for_status()
     except requests.exceptions.RequestException as error:
-        logger.warning(f'{config_filename}: Sentry error: {error}')
+        logger.warning(f'Sentry error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 4 - 8
borgmatic/hooks/monitoring/uptime_kuma.py

@@ -28,10 +28,8 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     status = 'down' if state.name.lower() == 'fail' else 'up'
     push_url = hook_config.get('push_url', 'https://example.uptime.kuma/api/push/abcd1234')
     query = f'status={status}&msg={state.name.lower()}'
-    logger.info(
-        f'{config_filename}: Pushing Uptime Kuma push_url {push_url}?{query} {dry_run_label}'
-    )
-    logger.debug(f'{config_filename}: Full Uptime Kuma state URL {push_url}?{query}')
+    logger.info(f'Pushing Uptime Kuma push_url {push_url}?{query} {dry_run_label}')
+    logger.debug(f'Full Uptime Kuma state URL {push_url}?{query}')
 
     if dry_run:
         return
@@ -43,12 +41,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         if not response.ok:
             response.raise_for_status()
     except requests.exceptions.RequestException as error:
-        logger.warning(f'{config_filename}: Uptime Kuma error: {error}')
+        logger.warning(f'Uptime Kuma error: {error}')
 
 
-def destroy_monitor(
-    push_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(push_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 16 - 18
borgmatic/hooks/monitoring/zabbix.py

@@ -44,16 +44,16 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
     value = state_config.get('value')
     headers = {'Content-Type': 'application/json-rpc'}
 
-    logger.info(f'{config_filename}: Updating Zabbix{dry_run_label}')
-    logger.debug(f'{config_filename}: Using Zabbix URL: {server}')
+    logger.info(f'Updating Zabbix{dry_run_label}')
+    logger.debug(f'Using Zabbix URL: {server}')
 
     if server is None:
-        logger.warning(f'{config_filename}: Server missing for Zabbix')
+        logger.warning('Server missing for Zabbix')
         return
 
     # Determine the Zabbix method used to store the value: itemid or host/key
     if itemid is not None:
-        logger.info(f'{config_filename}: Updating {itemid} on Zabbix')
+        logger.info(f'Updating {itemid} on Zabbix')
         data = {
             'jsonrpc': '2.0',
             'method': 'history.push',
@@ -62,7 +62,7 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         }
 
     elif (host and key) is not None:
-        logger.info(f'{config_filename}: Updating Host:{host} and Key:{key} on Zabbix')
+        logger.info(f'Updating Host:{host} and Key:{key} on Zabbix')
         data = {
             'jsonrpc': '2.0',
             'method': 'history.push',
@@ -71,23 +71,23 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
         }
 
     elif host is not None:
-        logger.warning(f'{config_filename}: Key missing for Zabbix')
+        logger.warning('Key missing for Zabbix')
         return
 
     elif key is not None:
-        logger.warning(f'{config_filename}: Host missing for Zabbix.')
+        logger.warning('Host missing for Zabbix')
         return
     else:
-        logger.warning(f'{config_filename}: No zabbix itemid or host/key provided.')
+        logger.warning('No Zabbix itemid or host/key provided')
         return
 
     # Determine the authentication method: API key or username/password
     if api_key is not None:
-        logger.info(f'{config_filename}: Using API key auth for Zabbix')
+        logger.info('Using API key auth for Zabbix')
         headers['Authorization'] = 'Bearer ' + api_key
 
     elif (username and password) is not None:
-        logger.info(f'{config_filename}: Using user/pass auth with user {username} for Zabbix')
+        logger.info('Using user/pass auth with user {username} for Zabbix')
         auth_data = {
             'jsonrpc': '2.0',
             'method': 'user.login',
@@ -102,18 +102,18 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
                 if not response.ok:
                     response.raise_for_status()
             except requests.exceptions.RequestException as error:
-                logger.warning(f'{config_filename}: Zabbix error: {error}')
+                logger.warning(f'Zabbix error: {error}')
                 return
 
     elif username is not None:
-        logger.warning(f'{config_filename}: Password missing for Zabbix authentication')
+        logger.warning('Password missing for Zabbix authentication')
         return
 
     elif password is not None:
-        logger.warning(f'{config_filename}: Username missing for Zabbix authentication')
+        logger.warning('Username missing for Zabbix authentication')
         return
     else:
-        logger.warning(f'{config_filename}: Authentication data missing for Zabbix')
+        logger.warning('Authentication data missing for Zabbix')
         return
 
     if not dry_run:
@@ -123,12 +123,10 @@ def ping_monitor(hook_config, config, config_filename, state, monitoring_log_lev
             if not response.ok:
                 response.raise_for_status()
         except requests.exceptions.RequestException as error:
-            logger.warning(f'{config_filename}: Zabbix error: {error}')
+            logger.warning(f'Zabbix error: {error}')
 
 
-def destroy_monitor(
-    ping_url_or_uuid, config, config_filename, monitoring_log_level, dry_run
-):  # pragma: no cover
+def destroy_monitor(ping_url_or_uuid, config, monitoring_log_level, dry_run):  # pragma: no cover
     '''
     No destruction is necessary for this monitor.
     '''

+ 84 - 5
borgmatic/logger.py

@@ -88,8 +88,10 @@ class Multi_stream_handler(logging.Handler):
 
 
 class Console_no_color_formatter(logging.Formatter):
-    def format(self, record):  # pragma: no cover
-        return record.msg
+    def __init__(self, *args, **kwargs):  # pragma: no cover
+        super(Console_no_color_formatter, self).__init__(
+            '{prefix}{message}', style='{', defaults={'prefix': ''}, *args, **kwargs
+        )
 
 
 class Color(enum.Enum):
@@ -102,6 +104,11 @@ class Color(enum.Enum):
 
 
 class Console_color_formatter(logging.Formatter):
+    def __init__(self, *args, **kwargs):
+        super(Console_color_formatter, self).__init__(
+            '{prefix}{message}', style='{', defaults={'prefix': ''}, *args, **kwargs
+        )
+
     def format(self, record):
         add_custom_log_levels()
 
@@ -118,7 +125,7 @@ class Console_color_formatter(logging.Formatter):
             .value
         )
 
-        return color_text(color, record.msg)
+        return color_text(color, super(Console_color_formatter, self).format(record))
 
 
 def ansi_escape_code(color):  # pragma: no cover
@@ -179,6 +186,72 @@ def add_custom_log_levels():  # pragma: no cover
     add_logging_level('DISABLED', DISABLED)
 
 
+def get_log_prefix():
+    '''
+    Return the current log prefix from the defaults for the formatter on the first logging handler,
+    set by set_log_prefix(). Return None if no such prefix exists.
+    '''
+    try:
+        return next(
+            handler.formatter._style._defaults.get('prefix').rstrip().rstrip(':')
+            for handler in logging.getLogger().handlers
+        )
+    except (StopIteration, AttributeError):
+        return None
+
+
+def set_log_prefix(prefix):
+    '''
+    Given a log prefix as a string, set it into the defaults for the formatters on all logging
+    handlers. Note that this overwrites any existing defaults.
+    '''
+    for handler in logging.getLogger().handlers:
+        try:
+            handler.formatter._style._defaults = {'prefix': f'{prefix}: ' if prefix else ''}
+        except AttributeError:
+            pass
+
+
+class Log_prefix:
+    '''
+    A Python context manager for setting a log prefix so that it shows up in every subsequent
+    logging message for the duration of the context manager. For this to work, it relies on each
+    logging formatter to be initialized with "{prefix}" somewhere in its logging format.
+
+    Example use as a context manager:
+
+
+       with borgmatic.logger.Log_prefix('myprefix'):
+            do_something_that_logs()
+
+    For the scope of that "with" statement, any logs created are prefixed with "myprefix: ".
+    Afterwards, the prefix gets restored to whatever it was prior to the context manager.
+    '''
+
+    def __init__(self, prefix):
+        '''
+        Given the desired log prefix, save it for use below. Set prefix to None to disable any
+        prefix from getting logged.
+        '''
+        self.prefix = prefix
+        self.original_prefix = None
+
+    def __enter__(self):
+        '''
+        Set the prefix onto the formatter defaults for every logging handler so that the prefix ends
+        up in every log message. But first, save off any original prefix so that it can be restored
+        below.
+        '''
+        self.original_prefix = get_log_prefix()
+        set_log_prefix(self.prefix)
+
+    def __exit__(self, exception, value, traceback):
+        '''
+        Restore any original prefix.
+        '''
+        set_log_prefix(self.original_prefix)
+
+
 def configure_logging(
     console_log_level,
     syslog_log_level=None,
@@ -242,7 +315,11 @@ def configure_logging(
         if syslog_path:
             syslog_handler = logging.handlers.SysLogHandler(address=syslog_path)
             syslog_handler.setFormatter(
-                logging.Formatter('borgmatic: {levelname} {message}', style='{')  # noqa: FS003
+                logging.Formatter(
+                    'borgmatic: {levelname} {prefix}{message}',  # noqa: FS003
+                    style='{',
+                    defaults={'prefix': ''},
+                )
             )
             syslog_handler.setLevel(syslog_log_level)
             handlers.append(syslog_handler)
@@ -251,7 +328,9 @@ def configure_logging(
         file_handler = logging.handlers.WatchedFileHandler(log_file)
         file_handler.setFormatter(
             logging.Formatter(
-                log_file_format or '[{asctime}] {levelname}: {message}', style='{'  # noqa: FS003
+                log_file_format or '[{asctime}] {levelname}: {prefix}{message}',  # noqa: FS003
+                style='{',
+                defaults={'prefix': ''},
             )
         )
         file_handler.setLevel(log_file_log_level)

+ 6 - 10
docs/how-to/monitor-your-backups.md

@@ -478,22 +478,18 @@ your Loki API push URL. Here's an example:
 ```yaml
 loki:
     url: http://localhost:3100/loki/api/v1/push
+
+    labels:
+        app: borgmatic
+        hostname: example.org
 ```
 
 With this configuration, borgmatic sends its logs to your Loki instance as any
 of the `create`, `prune`, `compact`, or `check` actions are run. Then, after
 the actions complete, borgmatic notifies Loki of success or failure.
 
-This hook supports sending arbitrary labels to Loki. For instance:
-
-```yaml
-loki:
-    url: http://localhost:3100/loki/api/v1/push
-
-    labels:
-        app: borgmatic
-        hostname: example.org
-```
+This hook supports sending arbitrary labels to Loki. At least one label is
+required.
 
 There are also a few placeholders you can optionally use as label values:
 

+ 0 - 0
tests/end-to-end/hooks/__init__.py


+ 0 - 0
tests/end-to-end/hooks/data_source/__init__.py


+ 0 - 0
tests/end-to-end/test_btrfs.py → tests/end-to-end/hooks/data_source/test_btrfs.py


+ 0 - 0
tests/end-to-end/test_database.py → tests/end-to-end/hooks/data_source/test_database.py


+ 0 - 0
tests/end-to-end/test_lvm.py → tests/end-to-end/hooks/data_source/test_lvm.py


+ 0 - 0
tests/end-to-end/test_zfs.py → tests/end-to-end/hooks/data_source/test_zfs.py


+ 143 - 0
tests/end-to-end/hooks/monitoring/test_monitoring.py

@@ -0,0 +1,143 @@
+import http.server
+import json
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import threading
+
+import pytest
+
+
+def generate_configuration(config_path, repository_path, monitoring_hook_configuration):
+    '''
+    Generate borgmatic configuration into a file at the config path, and update the defaults so as
+    to work for testing, including updating the source directories, injecting the given repository
+    path, and tacking on an encryption passphrase.
+    '''
+    subprocess.check_call(f'borgmatic config generate --destination {config_path}'.split(' '))
+    config = (
+        open(config_path)
+        .read()
+        .replace('ssh://user@backupserver/./sourcehostname.borg', repository_path)
+        .replace('- path: /mnt/backup', '')
+        .replace('label: local', '')
+        .replace('- /home/user/path with spaces', '')
+        .replace('- /home', f'- {config_path}')
+        .replace('- /etc', '')
+        .replace('- /var/log/syslog*', '')
+        + '\nencryption_passphrase: "test"'
+        + f'\n{monitoring_hook_configuration}'
+    )
+    config_file = open(config_path, 'w')
+    config_file.write(config)
+    config_file.close()
+
+
+class Web_server(http.server.BaseHTTPRequestHandler):
+    def handle_method(self):
+        self.send_response(http.HTTPStatus.OK)
+        self.send_header('Content-type', 'text/html')
+        self.end_headers()
+        self.wfile.write(''.encode('utf-8'))
+
+    def do_GET(self):
+        self.handle_method()
+
+    def do_POST(self):
+        self.handle_method()
+
+
+def serve_web_request(count):
+    for index in range(0, count):
+        with http.server.HTTPServer(('localhost', 12345), Web_server) as server:
+            server.handle_request()
+
+
+class Background_web_server:
+    def __init__(self, expected_request_count):
+        self.expected_request_count = expected_request_count
+
+    def __enter__(self):
+        self.thread = threading.Thread(
+            target=lambda: serve_web_request(count=self.expected_request_count)
+        )
+        self.thread.start()
+
+    def __exit__(self, exception, value, traceback):
+        self.thread.join()
+
+
+START_AND_FINISH = 2
+START_LOG_AND_FINISH = 3
+
+
+@pytest.mark.parametrize(
+    'monitoring_hook_configuration,expected_request_count',
+    (
+        (
+            'cronhub:\n    ping_url: http://localhost:12345/start/1f5e3410-254c-11e8-b61d-55875966d031',
+            START_AND_FINISH,
+        ),
+        (
+            'cronitor:\n    ping_url: http://localhost:12345/d3x0c1',
+            START_AND_FINISH,
+        ),
+        (
+            'healthchecks:\n    ping_url: http://localhost:12345/addffa72-da17-40ae-be9c-ff591afb942a',
+            START_LOG_AND_FINISH,
+        ),
+        (
+            'loki:\n    url: http://localhost:12345/loki/api/v1/push\n    labels:\n        app: borgmatic',
+            START_AND_FINISH,
+        ),
+        (
+            'ntfy:\n    topic: my-unique-topic\n    server: http://localhost:12345\n    states: [start, finish]',
+            START_AND_FINISH,
+        ),
+        (
+            'sentry:\n    data_source_name_url: http://5f80ec@localhost:12345/203069\n    monitor_slug: mymonitor',
+            START_AND_FINISH,
+        ),
+        (
+            'uptime_kuma:\n    push_url: http://localhost:12345/api/push/abcd1234',
+            START_AND_FINISH,
+        ),
+        (
+            'zabbix:\n    itemid: 1\n    server: http://localhost:12345/zabbix/api_jsonrpc.php\n    api_key: mykey\n    states: [start, finish]',
+            START_AND_FINISH,
+        ),
+    ),
+)
+def test_borgmatic_command(monitoring_hook_configuration, expected_request_count):
+    # Create a Borg repository.
+    temporary_directory = tempfile.mkdtemp()
+    repository_path = os.path.join(temporary_directory, 'test.borg')
+    extract_path = os.path.join(temporary_directory, 'extract')
+
+    original_working_directory = os.getcwd()
+    os.mkdir(extract_path)
+    os.chdir(extract_path)
+
+    try:
+        config_path = os.path.join(temporary_directory, 'test.yaml')
+        generate_configuration(config_path, repository_path, monitoring_hook_configuration)
+
+        subprocess.check_call(
+            f'borgmatic -v 2 --config {config_path} repo-create --encryption repokey'.split(' ')
+        )
+
+        with Background_web_server(expected_request_count):
+            # Run borgmatic to generate a backup archive, and then list it to make sure it exists.
+            subprocess.check_call(f'borgmatic -v 2 --config {config_path}'.split(' '))
+            output = subprocess.check_output(
+                f'borgmatic --config {config_path} list --json'.split(' ')
+            ).decode(sys.stdout.encoding)
+            parsed_output = json.loads(output)
+
+            assert len(parsed_output) == 1
+            assert len(parsed_output[0]['archives']) == 1
+    finally:
+        os.chdir(original_working_directory)
+        shutil.rmtree(temporary_directory)

+ 1 - 1
tests/end-to-end/test_override.py

@@ -28,7 +28,7 @@ def generate_configuration(config_path, repository_path):
     config_file.close()
 
 
-def test_override_get_normalized():
+def test_override_gets_normalized():
     temporary_directory = tempfile.mkdtemp()
     repository_path = os.path.join(temporary_directory, 'test.borg')
 

+ 2 - 2
tests/integration/hooks/monitoring/test_apprise.py

@@ -14,7 +14,7 @@ def test_destroy_monitor_removes_apprise_handler():
         )
     )
 
-    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
+    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
 
     assert logger.handlers == original_handlers
 
@@ -23,6 +23,6 @@ def test_destroy_monitor_without_apprise_handler_does_not_raise():
     logger = logging.getLogger()
     original_handlers = list(logger.handlers)
 
-    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
+    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
 
     assert logger.handlers == original_handlers

+ 2 - 2
tests/integration/hooks/monitoring/test_healthchecks.py

@@ -14,7 +14,7 @@ def test_destroy_monitor_removes_healthchecks_handler():
         )
     )
 
-    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
+    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
 
     assert logger.handlers == original_handlers
 
@@ -23,6 +23,6 @@ def test_destroy_monitor_without_healthchecks_handler_does_not_raise():
     logger = logging.getLogger()
     original_handlers = list(logger.handlers)
 
-    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock(), flexmock())
+    module.destroy_monitor(flexmock(), flexmock(), flexmock(), flexmock())
 
     assert logger.handlers == original_handlers

+ 3 - 3
tests/integration/hooks/monitoring/test_loki.py

@@ -65,8 +65,8 @@ def test_ping_monitor_adds_log_message():
             assert any(
                 map(
                     lambda log: log
-                    == f'{config_filename}: {module.MONITOR_STATE_TO_LOKI[module.monitor.State.FINISH]} backup',
-                    map(lambda x: x[1], handler.buffer.root['streams'][0]['values']),
+                    == f'{module.MONITOR_STATE_TO_LOKI[module.monitor.State.FINISH]} backup',
+                    map(lambda value: value[1], handler.buffer.root['streams'][0]['values']),
                 )
             )
             return
@@ -82,7 +82,7 @@ def test_destroy_monitor_removes_log_handler():
     config_filename = 'test.yaml'
     dry_run = True
     module.initialize_monitor(hook_config, flexmock(), config_filename, flexmock(), dry_run)
-    module.destroy_monitor(hook_config, flexmock(), config_filename, flexmock(), dry_run)
+    module.destroy_monitor(hook_config, flexmock(), flexmock(), dry_run)
 
     for handler in tuple(logging.getLogger().handlers):
         if isinstance(handler, module.Loki_log_handler):

+ 0 - 8
tests/unit/actions/test_check.py

@@ -931,7 +931,6 @@ def test_compare_spot_check_hashes_returns_paths_having_failing_hashes():
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar', '/baz', '/quux'),
     ) == ('/bar',)
 
@@ -972,7 +971,6 @@ def test_compare_spot_check_hashes_returns_relative_paths_having_failing_hashes(
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('foo', 'bar', 'baz', 'quux'),
     ) == ('bar',)
 
@@ -1013,7 +1011,6 @@ def test_compare_spot_check_hashes_handles_data_sample_percentage_above_100():
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar'),
     ) == ('/foo', '/bar')
 
@@ -1051,7 +1048,6 @@ def test_compare_spot_check_hashes_uses_xxh64sum_command_option():
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar', '/baz', '/quux'),
     ) == ('/bar',)
 
@@ -1088,7 +1084,6 @@ def test_compare_spot_check_hashes_considers_path_missing_from_archive_as_not_ma
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar', '/baz', '/quux'),
     ) == ('/bar',)
 
@@ -1124,7 +1119,6 @@ def test_compare_spot_check_hashes_considers_non_existent_path_as_not_matching()
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar', '/baz', '/quux'),
     ) == ('/bar',)
 
@@ -1171,7 +1165,6 @@ def test_compare_spot_check_hashes_with_too_many_paths_feeds_them_to_commands_in
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('/foo', '/bar', '/baz', '/quux'),
     ) == ('/quux',)
 
@@ -1214,7 +1207,6 @@ def test_compare_spot_check_hashes_uses_working_directory_to_access_source_paths
         global_arguments=flexmock(),
         local_path=flexmock(),
         remote_path=flexmock(),
-        log_prefix='repo',
         source_paths=('foo', 'bar', 'baz', 'quux'),
     ) == ('bar',)
 

+ 4 - 20
tests/unit/actions/test_restore.py

@@ -194,7 +194,6 @@ def test_get_configured_data_source_matches_data_source_with_restore_dump():
             'postgresql_databases': [{'name': 'foo'}, {'name': 'bar'}],
         },
         restore_dump=module.Dump('postgresql_databases', 'bar'),
-        log_prefix='test',
     ) == {'name': 'bar'}
 
 
@@ -206,7 +205,6 @@ def test_get_configured_data_source_matches_nothing_when_nothing_configured():
         module.get_configured_data_source(
             config={},
             restore_dump=module.Dump('postgresql_databases', 'quux'),
-            log_prefix='test',
         )
         is None
     )
@@ -222,7 +220,6 @@ def test_get_configured_data_source_matches_nothing_when_restore_dump_does_not_m
                 'postgresql_databases': [{'name': 'foo'}],
             },
             restore_dump=module.Dump('postgresql_databases', 'quux'),
-            log_prefix='test',
         )
         is None
     )
@@ -250,7 +247,6 @@ def test_get_configured_data_source_with_multiple_matching_data_sources_errors()
                 ],
             },
             restore_dump=module.Dump('postgresql_databases', 'bar'),
-            log_prefix='test',
         )
 
 
@@ -277,7 +273,7 @@ def test_strip_path_prefix_from_extracted_dump_destination_renames_first_matchin
 def test_restore_single_dump_extracts_and_restores_single_file_dump():
     flexmock(module).should_receive('render_dump_metadata').and_return('test')
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').with_args(
-        'make_data_source_dump_patterns', object, object, object, object, object
+        'make_data_source_dump_patterns', object, object, object, object
     ).and_return({'postgresql': flexmock()})
     flexmock(module.tempfile).should_receive('mkdtemp').never()
     flexmock(module.borgmatic.hooks.data_source.dump).should_receive(
@@ -291,7 +287,6 @@ def test_restore_single_dump_extracts_and_restores_single_file_dump():
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hook').with_args(
         function_name='restore_data_source_dump',
         config=object,
-        log_prefix=object,
         hook_name=object,
         data_source=object,
         dry_run=object,
@@ -318,7 +313,7 @@ def test_restore_single_dump_extracts_and_restores_single_file_dump():
 def test_restore_single_dump_extracts_and_restores_directory_dump():
     flexmock(module).should_receive('render_dump_metadata').and_return('test')
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').with_args(
-        'make_data_source_dump_patterns', object, object, object, object, object
+        'make_data_source_dump_patterns', object, object, object, object
     ).and_return({'postgresql': flexmock()})
     flexmock(module.tempfile).should_receive('mkdtemp').once().and_return(
         '/run/user/0/borgmatic/tmp1234'
@@ -334,7 +329,6 @@ def test_restore_single_dump_extracts_and_restores_directory_dump():
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hook').with_args(
         function_name='restore_data_source_dump',
         config=object,
-        log_prefix=object,
         hook_name=object,
         data_source=object,
         dry_run=object,
@@ -361,7 +355,7 @@ def test_restore_single_dump_extracts_and_restores_directory_dump():
 def test_restore_single_dump_with_directory_dump_error_cleans_up_temporary_directory():
     flexmock(module).should_receive('render_dump_metadata').and_return('test')
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').with_args(
-        'make_data_source_dump_patterns', object, object, object, object, object
+        'make_data_source_dump_patterns', object, object, object, object
     ).and_return({'postgresql': flexmock()})
     flexmock(module.tempfile).should_receive('mkdtemp').once().and_return(
         '/run/user/0/borgmatic/tmp1234'
@@ -377,7 +371,6 @@ def test_restore_single_dump_with_directory_dump_error_cleans_up_temporary_direc
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hook').with_args(
         function_name='restore_data_source_dump',
         config=object,
-        log_prefix=object,
         hook_name=object,
         data_source=object,
         dry_run=object,
@@ -405,7 +398,7 @@ def test_restore_single_dump_with_directory_dump_error_cleans_up_temporary_direc
 def test_restore_single_dump_with_directory_dump_and_dry_run_skips_directory_move_and_cleanup():
     flexmock(module).should_receive('render_dump_metadata').and_return('test')
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').with_args(
-        'make_data_source_dump_patterns', object, object, object, object, object
+        'make_data_source_dump_patterns', object, object, object, object
     ).and_return({'postgresql': flexmock()})
     flexmock(module.tempfile).should_receive('mkdtemp').once().and_return('/run/borgmatic/tmp1234')
     flexmock(module.borgmatic.hooks.data_source.dump).should_receive(
@@ -419,7 +412,6 @@ def test_restore_single_dump_with_directory_dump_and_dry_run_skips_directory_mov
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hook').with_args(
         function_name='restore_data_source_dump',
         config=object,
-        log_prefix=object,
         hook_name=object,
         data_source=object,
         dry_run=object,
@@ -1064,17 +1056,14 @@ def test_run_restore_restores_data_source_configured_with_all_name():
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='foo'),
-        log_prefix=object,
     ).and_return({'name': 'foo'})
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='bar'),
-        log_prefix=object,
     ).and_return(None)
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='all'),
-        log_prefix=object,
     ).and_return({'name': 'bar'})
     flexmock(module).should_receive('restore_single_dump').with_args(
         repository=object,
@@ -1148,17 +1137,14 @@ def test_run_restore_skips_missing_data_source():
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='foo'),
-        log_prefix=object,
     ).and_return({'name': 'foo'})
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='bar'),
-        log_prefix=object,
     ).and_return(None)
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='all'),
-        log_prefix=object,
     ).and_return(None)
     flexmock(module).should_receive('restore_single_dump').with_args(
         repository=object,
@@ -1232,12 +1218,10 @@ def test_run_restore_restores_data_sources_from_different_hooks():
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='postgresql_databases', data_source_name='foo'),
-        log_prefix=object,
     ).and_return({'name': 'foo'})
     flexmock(module).should_receive('get_configured_data_source').with_args(
         config=object,
         restore_dump=module.Dump(hook_name='mysql_databases', data_source_name='foo'),
-        log_prefix=object,
     ).and_return({'name': 'bar'})
     flexmock(module).should_receive('restore_single_dump').with_args(
         repository=object,

+ 3 - 9
tests/unit/borg/test_create.py

@@ -17,14 +17,11 @@ def test_write_patterns_file_writes_pattern_lines():
     module.write_patterns_file(
         [Pattern('/foo'), Pattern('/foo/bar', Pattern_type.INCLUDE, Pattern_style.SHELL)],
         borgmatic_runtime_directory='/run/user/0',
-        log_prefix='test.yaml',
     )
 
 
 def test_write_patterns_file_with_empty_exclude_patterns_does_not_raise():
-    module.write_patterns_file(
-        [], borgmatic_runtime_directory='/run/user/0', log_prefix='test.yaml'
-    )
+    module.write_patterns_file([], borgmatic_runtime_directory='/run/user/0')
 
 
 def test_write_patterns_file_appends_to_existing():
@@ -36,7 +33,6 @@ def test_write_patterns_file_appends_to_existing():
     module.write_patterns_file(
         [Pattern('/foo'), Pattern('/foo/bar', Pattern_type.INCLUDE)],
         borgmatic_runtime_directory='/run/user/0',
-        log_prefix='test.yaml',
         patterns_file=patterns_file,
     )
 
@@ -647,7 +643,7 @@ def test_make_base_create_command_with_stream_processes_ignores_read_special_fal
     patterns_file = flexmock(name='patterns')
     flexmock(module.borgmatic.config.paths).should_receive('get_working_directory').and_return(None)
     flexmock(module).should_receive('write_patterns_file').with_args(
-        patterns, '/run/borgmatic', object
+        patterns, '/run/borgmatic'
     ).and_return(patterns_file)
     flexmock(module).should_receive('make_list_filter_flags').and_return('FOO')
     flexmock(module.flags).should_receive('get_default_archive_name_format').and_return(
@@ -670,7 +666,6 @@ def test_make_base_create_command_with_stream_processes_ignores_read_special_fal
             ),
         ),
         '/run/borgmatic',
-        'repo',
         patterns_file=patterns_file,
     ).and_return(patterns_file).once()
     flexmock(module).should_receive('make_exclude_flags').and_return(())
@@ -698,7 +693,7 @@ def test_make_base_create_command_with_stream_processes_ignores_read_special_fal
 def test_make_base_create_command_without_patterns_and_with_stream_processes_ignores_read_special_false_and_excludes_special_files():
     flexmock(module.borgmatic.config.paths).should_receive('get_working_directory').and_return(None)
     flexmock(module).should_receive('write_patterns_file').with_args(
-        [], '/run/borgmatic', object
+        [], '/run/borgmatic'
     ).and_return(None)
     flexmock(module).should_receive('make_list_filter_flags').and_return('FOO')
     flexmock(module.flags).should_receive('get_default_archive_name_format').and_return(
@@ -721,7 +716,6 @@ def test_make_base_create_command_without_patterns_and_with_stream_processes_ign
             ),
         ),
         '/run/borgmatic',
-        'repo',
         patterns_file=None,
     ).and_return(flexmock(name='patterns')).once()
     flexmock(module).should_receive('make_exclude_flags').and_return(())

+ 48 - 18
tests/unit/commands/test_borgmatic.py

@@ -32,6 +32,7 @@ def test_run_configuration_runs_actions_for_each_repository():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     expected_results = [flexmock(), flexmock()]
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return(expected_results[:1]).and_return(
         expected_results[1:]
     )
@@ -47,6 +48,7 @@ def test_run_configuration_with_skip_actions_does_not_raise():
     flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
     flexmock(module).should_receive('get_skip_actions').and_return(['compact'])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return(flexmock()).and_return(flexmock())
     config = {'repositories': [{'path': 'foo'}, {'path': 'bar'}], 'skip_actions': ['compact']}
     arguments = {'global': flexmock(monitoring_verbosity=1)}
@@ -60,6 +62,7 @@ def test_run_configuration_with_invalid_borg_version_errors():
     flexmock(module.borg_version).should_receive('local_borg_version').and_raise(ValueError)
     flexmock(module.command).should_receive('execute_hook').never()
     flexmock(module.dispatch).should_receive('call_hooks').never()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').never()
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'prune': flexmock()}
@@ -76,6 +79,7 @@ def test_run_configuration_logs_monitor_start_error():
     ).and_return(None).and_return(None)
     expected_results = [flexmock()]
     flexmock(module).should_receive('log_error_records').and_return(expected_results)
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').never()
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -92,6 +96,7 @@ def test_run_configuration_bails_for_monitor_start_soft_failure():
     error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
     flexmock(module.dispatch).should_receive('call_hooks').and_raise(error).and_return(None)
     flexmock(module).should_receive('log_error_records').never()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').never()
     config = {'repositories': [{'path': 'foo'}, {'path': 'bar'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -109,6 +114,7 @@ def test_run_configuration_logs_actions_error():
     flexmock(module.dispatch).should_receive('call_hooks')
     expected_results = [flexmock()]
     flexmock(module).should_receive('log_error_records').and_return(expected_results)
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError)
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False)}
@@ -125,6 +131,7 @@ def test_run_configuration_skips_remaining_actions_for_actions_soft_failure_but_
     flexmock(module.dispatch).should_receive('call_hooks').times(5)
     error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
     log = flexmock()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').twice().and_raise(error).and_yield(log)
     flexmock(module).should_receive('log_error_records').never()
     flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
@@ -145,6 +152,7 @@ def test_run_configuration_logs_monitor_log_error():
     ).and_raise(OSError)
     expected_results = [flexmock()]
     flexmock(module).should_receive('log_error_records').and_return(expected_results)
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -163,6 +171,7 @@ def test_run_configuration_still_pings_monitor_for_monitor_log_soft_failure():
         None
     ).and_raise(error).and_return(None).and_return(None).times(5)
     flexmock(module).should_receive('log_error_records').never()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
     flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
     config = {'repositories': [{'path': 'foo'}]}
@@ -182,6 +191,7 @@ def test_run_configuration_logs_monitor_finish_error():
     ).and_return(None).and_raise(OSError)
     expected_results = [flexmock()]
     flexmock(module).should_receive('log_error_records').and_return(expected_results)
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -200,6 +210,7 @@ def test_run_configuration_bails_for_monitor_finish_soft_failure():
         None
     ).and_raise(None).and_raise(error)
     flexmock(module).should_receive('log_error_records').never()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
     flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
     config = {'repositories': [{'path': 'foo'}]}
@@ -216,6 +227,7 @@ def test_run_configuration_does_not_call_monitoring_hooks_if_monitoring_hooks_ar
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
 
     flexmock(module.dispatch).should_receive('call_hooks').never()
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
 
     config = {'repositories': [{'path': 'foo'}]}
@@ -233,6 +245,7 @@ def test_run_configuration_logs_on_error_hook_error():
     flexmock(module).should_receive('log_error_records').and_return(
         expected_results[:1]
     ).and_return(expected_results[1:])
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError)
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -250,6 +263,7 @@ def test_run_configuration_bails_for_on_error_hook_soft_failure():
     flexmock(module.command).should_receive('execute_hook').and_raise(error)
     expected_results = [flexmock()]
     flexmock(module).should_receive('log_error_records').and_return(expected_results)
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError)
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -265,6 +279,7 @@ def test_run_configuration_retries_soft_error():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).and_return([])
     flexmock(module).should_receive('log_error_records').and_return([flexmock()]).once()
     config = {'repositories': [{'path': 'foo'}], 'retries': 1}
@@ -279,16 +294,17 @@ def test_run_configuration_retries_hard_error():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).times(2)
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return([flexmock()])
     error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
     ).and_return(error_logs)
     config = {'repositories': [{'path': 'foo'}], 'retries': 1}
@@ -302,13 +318,14 @@ def test_run_configuration_repos_ordered():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).times(2)
     expected_results = [flexmock(), flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(expected_results[:1]).ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(expected_results[1:]).ordered()
     config = {'repositories': [{'path': 'foo'}, {'path': 'bar'}]}
     arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
@@ -321,26 +338,27 @@ def test_run_configuration_retries_round_robin():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).times(4)
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return([flexmock()]).ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return([flexmock()]).ordered()
     foo_error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(foo_error_logs).ordered()
     bar_error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(bar_error_logs).ordered()
     config = {
         'repositories': [{'path': 'foo'}, {'path': 'bar'}],
@@ -356,24 +374,25 @@ def test_run_configuration_retries_one_passes():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).and_raise(OSError).and_return(
         []
     ).and_raise(OSError).times(4)
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return([flexmock()]).ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return(flexmock()).ordered()
     error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(error_logs).ordered()
     config = {
         'repositories': [{'path': 'foo'}, {'path': 'bar'}],
@@ -389,9 +408,10 @@ def test_run_configuration_retry_wait():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).times(4)
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
@@ -399,7 +419,7 @@ def test_run_configuration_retry_wait():
 
     flexmock(time).should_receive('sleep').with_args(10).and_return().ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
@@ -407,7 +427,7 @@ def test_run_configuration_retry_wait():
 
     flexmock(time).should_receive('sleep').with_args(20).and_return().ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
@@ -416,7 +436,7 @@ def test_run_configuration_retry_wait():
     flexmock(time).should_receive('sleep').with_args(30).and_return().ordered()
     error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(error_logs).ordered()
     config = {
         'repositories': [{'path': 'foo'}],
@@ -433,17 +453,18 @@ def test_run_configuration_retries_timeout_multiple_repos():
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
     flexmock(module.command).should_receive('execute_hook')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_raise(OSError).and_raise(OSError).and_return(
         []
     ).and_raise(OSError).times(4)
     flexmock(module).should_receive('log_error_records').with_args(
-        'foo: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
     ).and_return([flexmock()]).ordered()
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository',
+        'Error running actions for repository',
         OSError,
         levelno=logging.WARNING,
         log_command_error_output=True,
@@ -456,7 +477,7 @@ def test_run_configuration_retries_timeout_multiple_repos():
     flexmock(time).should_receive('sleep').with_args(10).and_return().ordered()
     error_logs = [flexmock()]
     flexmock(module).should_receive('log_error_records').with_args(
-        'bar: Error running actions for repository', OSError
+        'Error running actions for repository', OSError
     ).and_return(error_logs).ordered()
     config = {
         'repositories': [{'path': 'foo'}, {'path': 'bar'}],
@@ -1370,6 +1391,7 @@ def test_collect_highlander_action_summary_logs_error_on_run_validate_failure():
 def test_collect_configuration_run_summary_logs_info_for_success():
     flexmock(module.command).should_receive('execute_hook').never()
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     arguments = {}
 
@@ -1384,6 +1406,7 @@ def test_collect_configuration_run_summary_logs_info_for_success():
 
 def test_collect_configuration_run_summary_executes_hooks_for_create():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
 
@@ -1398,6 +1421,7 @@ def test_collect_configuration_run_summary_executes_hooks_for_create():
 
 def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     arguments = {'extract': flexmock(repository='repo')}
 
@@ -1429,6 +1453,7 @@ def test_collect_configuration_run_summary_logs_extract_with_repository_error():
 
 def test_collect_configuration_run_summary_logs_info_for_success_with_mount():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     arguments = {'mount': flexmock(repository='repo')}
 
@@ -1488,6 +1513,7 @@ def test_collect_configuration_run_summary_logs_pre_hook_error():
 def test_collect_configuration_run_summary_logs_post_hook_error():
     flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(ValueError)
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     expected_logs = (flexmock(),)
     flexmock(module).should_receive('log_error_records').and_return(expected_logs)
@@ -1521,6 +1547,7 @@ def test_collect_configuration_run_summary_logs_for_list_with_archive_and_reposi
 
 def test_collect_configuration_run_summary_logs_info_for_success_with_list():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     arguments = {'list': flexmock(repository='repo', archive=None)}
 
@@ -1535,6 +1562,7 @@ def test_collect_configuration_run_summary_logs_info_for_success_with_list():
 
 def test_collect_configuration_run_summary_logs_run_configuration_error():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return(
         [logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
     )
@@ -1552,6 +1580,7 @@ def test_collect_configuration_run_summary_logs_run_configuration_error():
 
 def test_collect_configuration_run_summary_logs_run_umount_error():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return([])
     flexmock(module.borg_umount).should_receive('unmount_archive').and_raise(OSError)
     flexmock(module).should_receive('log_error_records').and_return(
@@ -1570,6 +1599,7 @@ def test_collect_configuration_run_summary_logs_run_umount_error():
 
 def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_configuration').and_return(['foo', 'bar']).and_return(
         ['baz']
     )

+ 15 - 17
tests/unit/config/test_paths.py

@@ -66,7 +66,7 @@ def test_runtime_directory_uses_config_option():
     flexmock(module.os).should_receive('makedirs')
     config = {'user_runtime_directory': '/run', 'borgmatic_source_directory': '/nope'}
 
-    with module.Runtime_directory(config, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory(config) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -75,7 +75,7 @@ def test_runtime_directory_uses_config_option_without_adding_duplicate_borgmatic
     flexmock(module.os).should_receive('makedirs')
     config = {'user_runtime_directory': '/run/borgmatic', 'borgmatic_source_directory': '/nope'}
 
-    with module.Runtime_directory(config, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory(config) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -84,9 +84,7 @@ def test_runtime_directory_with_relative_config_option_errors():
     config = {'user_runtime_directory': 'run', 'borgmatic_source_directory': '/nope'}
 
     with pytest.raises(ValueError):
-        with module.Runtime_directory(
-            config, 'prefix'
-        ) as borgmatic_runtime_directory:  # noqa: F841
+        with module.Runtime_directory(config) as borgmatic_runtime_directory:  # noqa: F841
             pass
 
 
@@ -97,7 +95,7 @@ def test_runtime_directory_falls_back_to_xdg_runtime_dir():
     )
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -108,7 +106,7 @@ def test_runtime_directory_falls_back_to_xdg_runtime_dir_without_adding_duplicat
     )
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -117,7 +115,7 @@ def test_runtime_directory_with_relative_xdg_runtime_dir_errors():
     flexmock(module.os).should_receive('makedirs').never()
 
     with pytest.raises(ValueError):
-        with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:  # noqa: F841
+        with module.Runtime_directory({}) as borgmatic_runtime_directory:  # noqa: F841
             pass
 
 
@@ -129,7 +127,7 @@ def test_runtime_directory_falls_back_to_runtime_directory():
     )
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -141,7 +139,7 @@ def test_runtime_directory_falls_back_to_runtime_directory_without_adding_duplic
     )
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/./borgmatic'
 
 
@@ -153,7 +151,7 @@ def test_runtime_directory_with_relative_runtime_directory_errors():
     flexmock(module.os).should_receive('makedirs').never()
 
     with pytest.raises(ValueError):
-        with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:  # noqa: F841
+        with module.Runtime_directory({}) as borgmatic_runtime_directory:  # noqa: F841
             pass
 
 
@@ -171,7 +169,7 @@ def test_runtime_directory_falls_back_to_tmpdir_and_adds_temporary_subdirectory_
     ).and_return(temporary_directory)
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/borgmatic-1234/./borgmatic'
 
 
@@ -185,7 +183,7 @@ def test_runtime_directory_with_relative_tmpdir_errors():
     flexmock(module.os).should_receive('makedirs').never()
 
     with pytest.raises(ValueError):
-        with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:  # noqa: F841
+        with module.Runtime_directory({}) as borgmatic_runtime_directory:  # noqa: F841
             pass
 
 
@@ -204,7 +202,7 @@ def test_runtime_directory_falls_back_to_temp_and_adds_temporary_subdirectory_th
     ).and_return(temporary_directory)
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/run/borgmatic-1234/./borgmatic'
 
 
@@ -219,7 +217,7 @@ def test_runtime_directory_with_relative_temp_errors():
     flexmock(module.os).should_receive('makedirs')
 
     with pytest.raises(ValueError):
-        with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:  # noqa: F841
+        with module.Runtime_directory({}) as borgmatic_runtime_directory:  # noqa: F841
             pass
 
 
@@ -238,7 +236,7 @@ def test_runtime_directory_falls_back_to_hard_coded_tmp_path_and_adds_temporary_
     ).and_return(temporary_directory)
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/tmp/borgmatic-1234/./borgmatic'
 
 
@@ -257,7 +255,7 @@ def test_runtime_directory_with_erroring_cleanup_does_not_raise():
     ).and_return(temporary_directory)
     flexmock(module.os).should_receive('makedirs')
 
-    with module.Runtime_directory({}, 'prefix') as borgmatic_runtime_directory:
+    with module.Runtime_directory({}) as borgmatic_runtime_directory:
         assert borgmatic_runtime_directory == '/tmp/borgmatic-1234/./borgmatic'
 
 

+ 0 - 7
tests/unit/hooks/data_source/test_bootstrap.py

@@ -24,7 +24,6 @@ def test_dump_data_sources_creates_manifest_file():
     module.dump_data_sources(
         hook_config=None,
         config={},
-        log_prefix='test',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -40,7 +39,6 @@ def test_dump_data_sources_with_store_config_files_false_does_not_create_manifes
     module.dump_data_sources(
         hook_config=hook_config,
         config={'bootstrap': hook_config},
-        log_prefix='test',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -55,7 +53,6 @@ def test_dump_data_sources_with_dry_run_does_not_create_manifest_file():
     module.dump_data_sources(
         hook_config=None,
         config={},
-        log_prefix='test',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -76,7 +73,6 @@ def test_remove_data_source_dumps_deletes_manifest_and_parent_directory():
     module.remove_data_source_dumps(
         hook_config=None,
         config={},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -93,7 +89,6 @@ def test_remove_data_source_dumps_with_dry_run_bails():
     module.remove_data_source_dumps(
         hook_config=None,
         config={},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=True,
     )
@@ -112,7 +107,6 @@ def test_remove_data_source_dumps_swallows_manifest_file_not_found_error():
     module.remove_data_source_dumps(
         hook_config=None,
         config={},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -133,7 +127,6 @@ def test_remove_data_source_dumps_swallows_manifest_parent_directory_not_found_e
     module.remove_data_source_dumps(
         hook_config=None,
         config={},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )

+ 0 - 15
tests/unit/hooks/data_source/test_btrfs.py

@@ -189,7 +189,6 @@ def test_dump_data_sources_snapshots_each_subvolume_and_updates_patterns():
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -247,7 +246,6 @@ def test_dump_data_sources_uses_custom_btrfs_command_in_commands():
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -303,7 +301,6 @@ def test_dump_data_sources_uses_custom_findmnt_command_in_commands():
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -344,7 +341,6 @@ def test_dump_data_sources_with_dry_run_skips_snapshot_and_patterns_update():
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -369,7 +365,6 @@ def test_dump_data_sources_without_matching_subvolumes_skips_snapshot_and_patter
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -432,7 +427,6 @@ def test_dump_data_sources_snapshots_adds_to_existing_exclude_patterns():
         module.dump_data_sources(
             hook_config=config['btrfs'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -542,7 +536,6 @@ def test_remove_data_source_dumps_deletes_snapshots():
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -560,7 +553,6 @@ def test_remove_data_source_dumps_without_hook_configuration_bails():
     module.remove_data_source_dumps(
         hook_config=None,
         config={'source_directories': '/mnt/subvolume'},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -579,7 +571,6 @@ def test_remove_data_source_dumps_with_get_subvolumes_file_not_found_error_bails
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -600,7 +591,6 @@ def test_remove_data_source_dumps_with_get_subvolumes_called_process_error_bails
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -664,7 +654,6 @@ def test_remove_data_source_dumps_with_dry_run_skips_deletes():
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=True,
     )
@@ -683,7 +672,6 @@ def test_remove_data_source_dumps_without_subvolumes_skips_deletes():
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -727,7 +715,6 @@ def test_remove_data_source_without_snapshots_skips_deletes():
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -791,7 +778,6 @@ def test_remove_data_source_dumps_with_delete_snapshot_file_not_found_error_bail
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -857,7 +843,6 @@ def test_remove_data_source_dumps_with_delete_snapshot_called_process_error_bail
     module.remove_data_source_dumps(
         hook_config=config['btrfs'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )

+ 3 - 3
tests/unit/hooks/data_source/test_dump.py

@@ -48,21 +48,21 @@ def test_remove_data_source_dumps_removes_dump_path():
     flexmock(module.os.path).should_receive('exists').and_return(True)
     flexmock(module.shutil).should_receive('rmtree').with_args('databases').once()
 
-    module.remove_data_source_dumps('databases', 'SuperDB', 'test.yaml', dry_run=False)
+    module.remove_data_source_dumps('databases', 'SuperDB', dry_run=False)
 
 
 def test_remove_data_source_dumps_with_dry_run_skips_removal():
     flexmock(module.os.path).should_receive('exists').never()
     flexmock(module.shutil).should_receive('rmtree').never()
 
-    module.remove_data_source_dumps('databases', 'SuperDB', 'test.yaml', dry_run=True)
+    module.remove_data_source_dumps('databases', 'SuperDB', dry_run=True)
 
 
 def test_remove_data_source_dumps_without_dump_path_present_skips_removal():
     flexmock(module.os.path).should_receive('exists').and_return(False)
     flexmock(module.shutil).should_receive('rmtree').never()
 
-    module.remove_data_source_dumps('databases', 'SuperDB', 'test.yaml', dry_run=False)
+    module.remove_data_source_dumps('databases', 'SuperDB', dry_run=False)
 
 
 def test_convert_glob_patterns_to_borg_pattern_makes_multipart_regular_expression():

+ 0 - 19
tests/unit/hooks/data_source/test_lvm.py

@@ -220,7 +220,6 @@ def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -246,7 +245,6 @@ def test_dump_data_sources_with_no_logical_volumes_skips_snapshots():
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -317,7 +315,6 @@ def test_dump_data_sources_uses_snapshot_size_for_snapshot():
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -398,7 +395,6 @@ def test_dump_data_sources_uses_custom_commands():
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -450,7 +446,6 @@ def test_dump_data_sources_with_dry_run_skips_snapshots_and_does_not_touch_patte
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -518,7 +513,6 @@ def test_dump_data_sources_ignores_mismatch_between_given_patterns_and_contained
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -572,7 +566,6 @@ def test_dump_data_sources_with_missing_snapshot_errors():
         module.dump_data_sources(
             hook_config=config['lvm'],
             config=config,
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -728,7 +721,6 @@ def test_remove_data_source_dumps_unmounts_and_remove_snapshots():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -745,7 +737,6 @@ def test_remove_data_source_dumps_bails_for_missing_lvm_configuration():
     module.remove_data_source_dumps(
         hook_config=None,
         config={'source_directories': '/mnt/lvolume'},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -763,7 +754,6 @@ def test_remove_data_source_dumps_bails_for_missing_lsblk_command():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -783,7 +773,6 @@ def test_remove_data_source_dumps_bails_for_lsblk_command_error():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -828,7 +817,6 @@ def test_remove_data_source_dumps_with_missing_snapshot_directory_skips_unmount(
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -886,7 +874,6 @@ def test_remove_data_source_dumps_with_missing_snapshot_mount_path_skips_unmount
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -944,7 +931,6 @@ def test_remove_data_source_dumps_with_successful_mount_point_removal_skips_unmo
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -988,7 +974,6 @@ def test_remove_data_source_dumps_bails_for_missing_umount_command():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -1032,7 +1017,6 @@ def test_remove_data_source_dumps_bails_for_umount_command_error():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -1076,7 +1060,6 @@ def test_remove_data_source_dumps_bails_for_missing_lvs_command():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -1122,7 +1105,6 @@ def test_remove_data_source_dumps_bails_for_lvs_command_error():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -1165,7 +1147,6 @@ def test_remove_data_source_with_dry_run_skips_snapshot_unmount_and_delete():
     module.remove_data_source_dumps(
         hook_config=config['lvm'],
         config=config,
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=True,
     )

+ 8 - 42
tests/unit/hooks/data_source/test_mariadb.py

@@ -8,50 +8,42 @@ from borgmatic.hooks.data_source import mariadb as module
 
 def test_database_names_to_dump_passes_through_name():
     extra_environment = flexmock()
-    log_prefix = ''
 
-    names = module.database_names_to_dump(
-        {'name': 'foo'}, extra_environment, log_prefix, dry_run=False
-    )
+    names = module.database_names_to_dump({'name': 'foo'}, extra_environment, dry_run=False)
 
     assert names == ('foo',)
 
 
 def test_database_names_to_dump_bails_for_dry_run():
     extra_environment = flexmock()
-    log_prefix = ''
     flexmock(module).should_receive('execute_command_and_capture_output').never()
 
-    names = module.database_names_to_dump(
-        {'name': 'all'}, extra_environment, log_prefix, dry_run=True
-    )
+    names = module.database_names_to_dump({'name': 'all'}, extra_environment, dry_run=True)
 
     assert names == ()
 
 
 def test_database_names_to_dump_queries_mariadb_for_database_names():
     extra_environment = flexmock()
-    log_prefix = ''
     flexmock(module).should_receive('execute_command_and_capture_output').with_args(
         ('mariadb', '--skip-column-names', '--batch', '--execute', 'show schemas'),
         extra_environment=extra_environment,
     ).and_return('foo\nbar\nmysql\n').once()
 
-    names = module.database_names_to_dump(
-        {'name': 'all'}, extra_environment, log_prefix, dry_run=False
-    )
+    names = module.database_names_to_dump({'name': 'all'}, extra_environment, dry_run=False)
 
     assert names == ('foo', 'bar')
 
 
 def test_use_streaming_true_for_any_databases():
     assert module.use_streaming(
-        databases=[flexmock(), flexmock()], config=flexmock(), log_prefix=flexmock()
+        databases=[flexmock(), flexmock()],
+        config=flexmock(),
     )
 
 
 def test_use_streaming_false_for_no_databases():
-    assert not module.use_streaming(databases=[], config=flexmock(), log_prefix=flexmock())
+    assert not module.use_streaming(databases=[], config=flexmock())
 
 
 def test_dump_data_sources_dumps_each_database():
@@ -65,7 +57,6 @@ def test_dump_data_sources_dumps_each_database():
     for name, process in zip(('foo', 'bar'), processes):
         flexmock(module).should_receive('execute_dump_command').with_args(
             database={'name': name},
-            log_prefix=object,
             dump_path=object,
             database_names=(name,),
             extra_environment=object,
@@ -77,7 +68,6 @@ def test_dump_data_sources_dumps_each_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -97,7 +87,6 @@ def test_dump_data_sources_dumps_with_password():
 
     flexmock(module).should_receive('execute_dump_command').with_args(
         database=database,
-        log_prefix=object,
         dump_path=object,
         database_names=('foo',),
         extra_environment={'MYSQL_PWD': 'trustsome1'},
@@ -108,7 +97,6 @@ def test_dump_data_sources_dumps_with_password():
     assert module.dump_data_sources(
         [database],
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -123,7 +111,6 @@ def test_dump_data_sources_dumps_all_databases_at_once():
     flexmock(module).should_receive('database_names_to_dump').and_return(('foo', 'bar'))
     flexmock(module).should_receive('execute_dump_command').with_args(
         database={'name': 'all'},
-        log_prefix=object,
         dump_path=object,
         database_names=('foo', 'bar'),
         extra_environment=object,
@@ -134,7 +121,6 @@ def test_dump_data_sources_dumps_all_databases_at_once():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -151,7 +137,6 @@ def test_dump_data_sources_dumps_all_databases_separately_when_format_configured
     for name, process in zip(('foo', 'bar'), processes):
         flexmock(module).should_receive('execute_dump_command').with_args(
             database={'name': name, 'format': 'sql'},
-            log_prefix=object,
             dump_path=object,
             database_names=(name,),
             extra_environment=object,
@@ -163,7 +148,6 @@ def test_dump_data_sources_dumps_all_databases_separately_when_format_configured
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -187,7 +171,7 @@ def test_database_names_to_dump_runs_mariadb_with_list_options():
         extra_environment=None,
     ).and_return(('foo\nbar')).once()
 
-    assert module.database_names_to_dump(database, None, 'test.yaml', '') == ('foo', 'bar')
+    assert module.database_names_to_dump(database, None, '') == ('foo', 'bar')
 
 
 def test_database_names_to_dump_runs_non_default_mariadb_with_list_options():
@@ -208,7 +192,7 @@ def test_database_names_to_dump_runs_non_default_mariadb_with_list_options():
         ),
     ).and_return(('foo\nbar')).once()
 
-    assert module.database_names_to_dump(database, None, 'test.yaml', '') == ('foo', 'bar')
+    assert module.database_names_to_dump(database, None, '') == ('foo', 'bar')
 
 
 def test_execute_dump_command_runs_mariadb_dump():
@@ -233,7 +217,6 @@ def test_execute_dump_command_runs_mariadb_dump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -265,7 +248,6 @@ def test_execute_dump_command_runs_mariadb_dump_without_add_drop_database():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'add_drop_database': False},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -304,7 +286,6 @@ def test_execute_dump_command_runs_mariadb_dump_with_hostname_and_port():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'hostname': 'database.example.org', 'port': 5433},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -339,7 +320,6 @@ def test_execute_dump_command_runs_mariadb_dump_with_username_and_password():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'username': 'root', 'password': 'trustsome1'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment={'MYSQL_PWD': 'trustsome1'},
@@ -373,7 +353,6 @@ def test_execute_dump_command_runs_mariadb_dump_with_options():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'options': '--stuff=such'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -411,7 +390,6 @@ def test_execute_dump_command_runs_non_default_mariadb_dump_with_options():
                 'mariadb_dump_command': 'custom_mariadb_dump',
                 'options': '--stuff=such',
             },  # Custom MariaDB dump command specified
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -431,7 +409,6 @@ def test_execute_dump_command_with_duplicate_dump_skips_mariadb_dump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -452,7 +429,6 @@ def test_execute_dump_command_with_dry_run_skips_mariadb_dump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -475,7 +451,6 @@ def test_dump_data_sources_errors_for_missing_all_databases():
         assert module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -495,7 +470,6 @@ def test_dump_data_sources_does_not_error_for_missing_all_databases_with_dry_run
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -520,7 +494,6 @@ def test_restore_data_source_dump_runs_mariadb_to_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -549,7 +522,6 @@ def test_restore_data_source_dump_runs_mariadb_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -580,7 +552,6 @@ def test_restore_data_source_dump_runs_non_default_mariadb_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -618,7 +589,6 @@ def test_restore_data_source_dump_runs_mariadb_with_hostname_and_port():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -647,7 +617,6 @@ def test_restore_data_source_dump_runs_mariadb_with_username_and_password():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -697,7 +666,6 @@ def test_restore_data_source_dump_with_connection_params_uses_connection_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -749,7 +717,6 @@ def test_restore_data_source_dump_without_connection_params_uses_restore_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -771,7 +738,6 @@ def test_restore_data_source_dump_with_dry_run_skips_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=True,
         extract_process=flexmock(),

+ 1 - 20
tests/unit/hooks/data_source/test_mongodb.py

@@ -9,7 +9,6 @@ def test_use_streaming_true_for_any_non_directory_format_databases():
     assert module.use_streaming(
         databases=[{'format': 'stuff'}, {'format': 'directory'}, {}],
         config=flexmock(),
-        log_prefix=flexmock(),
     )
 
 
@@ -17,12 +16,11 @@ def test_use_streaming_false_for_all_directory_format_databases():
     assert not module.use_streaming(
         databases=[{'format': 'directory'}, {'format': 'directory'}],
         config=flexmock(),
-        log_prefix=flexmock(),
     )
 
 
 def test_use_streaming_false_for_no_databases():
-    assert not module.use_streaming(databases=[], config=flexmock(), log_prefix=flexmock())
+    assert not module.use_streaming(databases=[], config=flexmock())
 
 
 def test_dump_data_sources_runs_mongodump_for_each_database():
@@ -45,7 +43,6 @@ def test_dump_data_sources_runs_mongodump_for_each_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -68,7 +65,6 @@ def test_dump_data_sources_with_dry_run_skips_mongodump():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -107,7 +103,6 @@ def test_dump_data_sources_runs_mongodump_with_hostname_and_port():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -153,7 +148,6 @@ def test_dump_data_sources_runs_mongodump_with_username_and_password():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -179,7 +173,6 @@ def test_dump_data_sources_runs_mongodump_with_directory_format():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -215,7 +208,6 @@ def test_dump_data_sources_runs_mongodump_with_options():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -241,7 +233,6 @@ def test_dump_data_sources_runs_mongodumpall_for_all_databases():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -273,7 +264,6 @@ def test_restore_data_source_dump_runs_mongorestore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -313,7 +303,6 @@ def test_restore_data_source_dump_runs_mongorestore_with_hostname_and_port():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -361,7 +350,6 @@ def test_restore_data_source_dump_runs_mongorestore_with_username_and_password()
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -417,7 +405,6 @@ def test_restore_data_source_dump_with_connection_params_uses_connection_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -473,7 +460,6 @@ def test_restore_data_source_dump_without_connection_params_uses_restore_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -503,7 +489,6 @@ def test_restore_data_source_dump_runs_mongorestore_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -541,7 +526,6 @@ def test_restore_databases_dump_runs_mongorestore_with_schemas():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -571,7 +555,6 @@ def test_restore_data_source_dump_runs_psql_for_all_database_dump():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -595,7 +578,6 @@ def test_restore_data_source_dump_with_dry_run_skips_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=True,
         extract_process=flexmock(),
@@ -624,7 +606,6 @@ def test_restore_data_source_dump_without_extract_process_restores_from_disk():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=None,

+ 8 - 42
tests/unit/hooks/data_source/test_mysql.py

@@ -8,50 +8,42 @@ from borgmatic.hooks.data_source import mysql as module
 
 def test_database_names_to_dump_passes_through_name():
     extra_environment = flexmock()
-    log_prefix = ''
 
-    names = module.database_names_to_dump(
-        {'name': 'foo'}, extra_environment, log_prefix, dry_run=False
-    )
+    names = module.database_names_to_dump({'name': 'foo'}, extra_environment, dry_run=False)
 
     assert names == ('foo',)
 
 
 def test_database_names_to_dump_bails_for_dry_run():
     extra_environment = flexmock()
-    log_prefix = ''
     flexmock(module).should_receive('execute_command_and_capture_output').never()
 
-    names = module.database_names_to_dump(
-        {'name': 'all'}, extra_environment, log_prefix, dry_run=True
-    )
+    names = module.database_names_to_dump({'name': 'all'}, extra_environment, dry_run=True)
 
     assert names == ()
 
 
 def test_database_names_to_dump_queries_mysql_for_database_names():
     extra_environment = flexmock()
-    log_prefix = ''
     flexmock(module).should_receive('execute_command_and_capture_output').with_args(
         ('mysql', '--skip-column-names', '--batch', '--execute', 'show schemas'),
         extra_environment=extra_environment,
     ).and_return('foo\nbar\nmysql\n').once()
 
-    names = module.database_names_to_dump(
-        {'name': 'all'}, extra_environment, log_prefix, dry_run=False
-    )
+    names = module.database_names_to_dump({'name': 'all'}, extra_environment, dry_run=False)
 
     assert names == ('foo', 'bar')
 
 
 def test_use_streaming_true_for_any_databases():
     assert module.use_streaming(
-        databases=[flexmock(), flexmock()], config=flexmock(), log_prefix=flexmock()
+        databases=[flexmock(), flexmock()],
+        config=flexmock(),
     )
 
 
 def test_use_streaming_false_for_no_databases():
-    assert not module.use_streaming(databases=[], config=flexmock(), log_prefix=flexmock())
+    assert not module.use_streaming(databases=[], config=flexmock())
 
 
 def test_dump_data_sources_dumps_each_database():
@@ -65,7 +57,6 @@ def test_dump_data_sources_dumps_each_database():
     for name, process in zip(('foo', 'bar'), processes):
         flexmock(module).should_receive('execute_dump_command').with_args(
             database={'name': name},
-            log_prefix=object,
             dump_path=object,
             database_names=(name,),
             extra_environment=object,
@@ -77,7 +68,6 @@ def test_dump_data_sources_dumps_each_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -97,7 +87,6 @@ def test_dump_data_sources_dumps_with_password():
 
     flexmock(module).should_receive('execute_dump_command').with_args(
         database=database,
-        log_prefix=object,
         dump_path=object,
         database_names=('foo',),
         extra_environment={'MYSQL_PWD': 'trustsome1'},
@@ -108,7 +97,6 @@ def test_dump_data_sources_dumps_with_password():
     assert module.dump_data_sources(
         [database],
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -123,7 +111,6 @@ def test_dump_data_sources_dumps_all_databases_at_once():
     flexmock(module).should_receive('database_names_to_dump').and_return(('foo', 'bar'))
     flexmock(module).should_receive('execute_dump_command').with_args(
         database={'name': 'all'},
-        log_prefix=object,
         dump_path=object,
         database_names=('foo', 'bar'),
         extra_environment=object,
@@ -134,7 +121,6 @@ def test_dump_data_sources_dumps_all_databases_at_once():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -151,7 +137,6 @@ def test_dump_data_sources_dumps_all_databases_separately_when_format_configured
     for name, process in zip(('foo', 'bar'), processes):
         flexmock(module).should_receive('execute_dump_command').with_args(
             database={'name': name, 'format': 'sql'},
-            log_prefix=object,
             dump_path=object,
             database_names=(name,),
             extra_environment=object,
@@ -163,7 +148,6 @@ def test_dump_data_sources_dumps_all_databases_separately_when_format_configured
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -187,7 +171,7 @@ def test_database_names_to_dump_runs_mysql_with_list_options():
         extra_environment=None,
     ).and_return(('foo\nbar')).once()
 
-    assert module.database_names_to_dump(database, None, 'test.yaml', '') == ('foo', 'bar')
+    assert module.database_names_to_dump(database, None, '') == ('foo', 'bar')
 
 
 def test_database_names_to_dump_runs_non_default_mysql_with_list_options():
@@ -208,7 +192,7 @@ def test_database_names_to_dump_runs_non_default_mysql_with_list_options():
         ),
     ).and_return(('foo\nbar')).once()
 
-    assert module.database_names_to_dump(database, None, 'test.yaml', '') == ('foo', 'bar')
+    assert module.database_names_to_dump(database, None, '') == ('foo', 'bar')
 
 
 def test_execute_dump_command_runs_mysqldump():
@@ -233,7 +217,6 @@ def test_execute_dump_command_runs_mysqldump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -265,7 +248,6 @@ def test_execute_dump_command_runs_mysqldump_without_add_drop_database():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'add_drop_database': False},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -304,7 +286,6 @@ def test_execute_dump_command_runs_mysqldump_with_hostname_and_port():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'hostname': 'database.example.org', 'port': 5433},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -339,7 +320,6 @@ def test_execute_dump_command_runs_mysqldump_with_username_and_password():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'username': 'root', 'password': 'trustsome1'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment={'MYSQL_PWD': 'trustsome1'},
@@ -373,7 +353,6 @@ def test_execute_dump_command_runs_mysqldump_with_options():
     assert (
         module.execute_dump_command(
             database={'name': 'foo', 'options': '--stuff=such'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -409,7 +388,6 @@ def test_execute_dump_command_runs_non_default_mysqldump():
                 'name': 'foo',
                 'mysql_dump_command': 'custom_mysqldump',
             },  # Custom MySQL dump command specified
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -429,7 +407,6 @@ def test_execute_dump_command_with_duplicate_dump_skips_mysqldump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -450,7 +427,6 @@ def test_execute_dump_command_with_dry_run_skips_mysqldump():
     assert (
         module.execute_dump_command(
             database={'name': 'foo'},
-            log_prefix='log',
             dump_path=flexmock(),
             database_names=('foo',),
             extra_environment=None,
@@ -473,7 +449,6 @@ def test_dump_data_sources_errors_for_missing_all_databases():
         assert module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -493,7 +468,6 @@ def test_dump_data_sources_does_not_error_for_missing_all_databases_with_dry_run
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -518,7 +492,6 @@ def test_restore_data_source_dump_runs_mysql_to_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -547,7 +520,6 @@ def test_restore_data_source_dump_runs_mysql_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -576,7 +548,6 @@ def test_restore_data_source_dump_runs_non_default_mysql_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -614,7 +585,6 @@ def test_restore_data_source_dump_runs_mysql_with_hostname_and_port():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -643,7 +613,6 @@ def test_restore_data_source_dump_runs_mysql_with_username_and_password():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -693,7 +662,6 @@ def test_restore_data_source_dump_with_connection_params_uses_connection_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -745,7 +713,6 @@ def test_restore_data_source_dump_without_connection_params_uses_restore_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -767,7 +734,6 @@ def test_restore_data_source_dump_with_dry_run_skips_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=True,
         extract_process=flexmock(),

+ 11 - 47
tests/unit/hooks/data_source/test_postgresql.py

@@ -61,32 +61,26 @@ def test_make_extra_environment_without_ssl_mode_does_not_set_ssl_mode():
 def test_database_names_to_dump_passes_through_individual_database_name():
     database = {'name': 'foo'}
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
-        'foo',
-    )
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == ('foo',)
 
 
 def test_database_names_to_dump_passes_through_individual_database_name_with_format():
     database = {'name': 'foo', 'format': 'custom'}
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
-        'foo',
-    )
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == ('foo',)
 
 
 def test_database_names_to_dump_passes_through_all_without_format():
     database = {'name': 'all'}
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
-        'all',
-    )
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == ('all',)
 
 
 def test_database_names_to_dump_with_all_and_format_and_dry_run_bails():
     database = {'name': 'all', 'format': 'custom'}
     flexmock(module).should_receive('execute_command_and_capture_output').never()
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=True) == ()
+    assert module.database_names_to_dump(database, flexmock(), dry_run=True) == ()
 
 
 def test_database_names_to_dump_with_all_and_format_lists_databases():
@@ -95,7 +89,7 @@ def test_database_names_to_dump_with_all_and_format_lists_databases():
         'foo,test,\nbar,test,"stuff and such"'
     )
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == (
         'foo',
         'bar',
     )
@@ -119,7 +113,7 @@ def test_database_names_to_dump_with_all_and_format_lists_databases_with_hostnam
         extra_environment=object,
     ).and_return('foo,test,\nbar,test,"stuff and such"')
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == (
         'foo',
         'bar',
     )
@@ -141,7 +135,7 @@ def test_database_names_to_dump_with_all_and_format_lists_databases_with_usernam
         extra_environment=object,
     ).and_return('foo,test,\nbar,test,"stuff and such"')
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == (
         'foo',
         'bar',
     )
@@ -154,7 +148,7 @@ def test_database_names_to_dump_with_all_and_format_lists_databases_with_options
         extra_environment=object,
     ).and_return('foo,test,\nbar,test,"stuff and such"')
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == (
         'foo',
         'bar',
     )
@@ -166,9 +160,7 @@ def test_database_names_to_dump_with_all_and_format_excludes_particular_database
         'foo,test,\ntemplate0,test,blah'
     )
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
-        'foo',
-    )
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == ('foo',)
 
 
 def test_database_names_to_dump_with_all_and_psql_command_uses_custom_command():
@@ -194,16 +186,13 @@ def test_database_names_to_dump_with_all_and_psql_command_uses_custom_command():
         extra_environment=object,
     ).and_return('foo,text').once()
 
-    assert module.database_names_to_dump(database, flexmock(), flexmock(), dry_run=False) == (
-        'foo',
-    )
+    assert module.database_names_to_dump(database, flexmock(), dry_run=False) == ('foo',)
 
 
 def test_use_streaming_true_for_any_non_directory_format_databases():
     assert module.use_streaming(
         databases=[{'format': 'stuff'}, {'format': 'directory'}, {}],
         config=flexmock(),
-        log_prefix=flexmock(),
     )
 
 
@@ -211,12 +200,11 @@ def test_use_streaming_false_for_all_directory_format_databases():
     assert not module.use_streaming(
         databases=[{'format': 'directory'}, {'format': 'directory'}],
         config=flexmock(),
-        log_prefix=flexmock(),
     )
 
 
 def test_use_streaming_false_for_no_databases():
-    assert not module.use_streaming(databases=[], config=flexmock(), log_prefix=flexmock())
+    assert not module.use_streaming(databases=[], config=flexmock())
 
 
 def test_dump_data_sources_runs_pg_dump_for_each_database():
@@ -255,7 +243,6 @@ def test_dump_data_sources_runs_pg_dump_for_each_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -275,7 +262,6 @@ def test_dump_data_sources_raises_when_no_database_names_to_dump():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -292,7 +278,6 @@ def test_dump_data_sources_does_not_raise_when_no_database_names_to_dump():
     module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -318,7 +303,6 @@ def test_dump_data_sources_with_duplicate_dump_skips_pg_dump():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -346,7 +330,6 @@ def test_dump_data_sources_with_dry_run_skips_pg_dump():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -392,7 +375,6 @@ def test_dump_data_sources_runs_pg_dump_with_hostname_and_port():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -436,7 +418,6 @@ def test_dump_data_sources_runs_pg_dump_with_username_and_password():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -480,7 +461,6 @@ def test_dump_data_sources_with_username_injection_attack_gets_escaped():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -520,7 +500,6 @@ def test_dump_data_sources_runs_pg_dump_with_directory_format():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -563,7 +542,6 @@ def test_dump_data_sources_runs_pg_dump_with_options():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -593,7 +571,6 @@ def test_dump_data_sources_runs_pg_dumpall_for_all_databases():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -635,7 +612,6 @@ def test_dump_data_sources_runs_non_default_pg_dump():
     assert module.dump_data_sources(
         databases,
         {},
-        'test.yaml',
         config_paths=('test.yaml',),
         borgmatic_runtime_directory='/run/borgmatic',
         patterns=[],
@@ -682,7 +658,6 @@ def test_restore_data_source_dump_runs_pg_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -745,7 +720,6 @@ def test_restore_data_source_dump_runs_pg_restore_with_hostname_and_port():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -806,7 +780,6 @@ def test_restore_data_source_dump_runs_pg_restore_with_username_and_password():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -886,7 +859,6 @@ def test_restore_data_source_dump_with_connection_params_uses_connection_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=extract_process,
@@ -966,7 +938,6 @@ def test_restore_data_source_dump_without_connection_params_uses_restore_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -1028,7 +999,6 @@ def test_restore_data_source_dump_runs_pg_restore_with_options():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -1068,7 +1038,6 @@ def test_restore_data_source_dump_runs_psql_for_all_database_dump():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'all'},
         dry_run=False,
         extract_process=extract_process,
@@ -1113,7 +1082,6 @@ def test_restore_data_source_dump_runs_psql_for_plain_database_dump():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -1183,7 +1151,6 @@ def test_restore_data_source_dump_runs_non_default_pg_restore_and_psql():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -1208,7 +1175,6 @@ def test_restore_data_source_dump_with_dry_run_skips_restore():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=True,
         extract_process=flexmock(),
@@ -1261,7 +1227,6 @@ def test_restore_data_source_dump_without_extract_process_restores_from_disk():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'foo'},
         dry_run=False,
         extract_process=None,
@@ -1318,7 +1283,6 @@ def test_restore_data_source_dump_with_schemas_restores_schemas():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=None,

+ 3 - 12
tests/unit/hooks/data_source/test_sqlite.py

@@ -7,12 +7,13 @@ from borgmatic.hooks.data_source import sqlite as module
 
 def test_use_streaming_true_for_any_databases():
     assert module.use_streaming(
-        databases=[flexmock(), flexmock()], config=flexmock(), log_prefix=flexmock()
+        databases=[flexmock(), flexmock()],
+        config=flexmock(),
     )
 
 
 def test_use_streaming_false_for_no_databases():
-    assert not module.use_streaming(databases=[], config=flexmock(), log_prefix=flexmock())
+    assert not module.use_streaming(databases=[], config=flexmock())
 
 
 def test_dump_data_sources_logs_and_skips_if_dump_already_exists():
@@ -30,7 +31,6 @@ def test_dump_data_sources_logs_and_skips_if_dump_already_exists():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -61,7 +61,6 @@ def test_dump_data_sources_dumps_each_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -99,7 +98,6 @@ def test_dump_data_sources_with_path_injection_attack_gets_escaped():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -128,7 +126,6 @@ def test_dump_data_sources_with_non_existent_path_warns_and_dumps_database():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -159,7 +156,6 @@ def test_dump_data_sources_with_name_all_warns_and_dumps_all_databases():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -184,7 +180,6 @@ def test_dump_data_sources_does_not_dump_if_dry_run():
         module.dump_data_sources(
             databases,
             {},
-            'test.yaml',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=[],
@@ -213,7 +208,6 @@ def test_restore_data_source_dump_restores_database():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -243,7 +237,6 @@ def test_restore_data_source_dump_with_connection_params_uses_connection_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'database'},
         dry_run=False,
         extract_process=extract_process,
@@ -273,7 +266,6 @@ def test_restore_data_source_dump_without_connection_params_uses_restore_params_
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source=hook_config[0],
         dry_run=False,
         extract_process=extract_process,
@@ -292,7 +284,6 @@ def test_restore_data_source_dump_does_not_restore_database_if_dry_run():
     module.restore_data_source_dump(
         hook_config,
         {},
-        'test.yaml',
         data_source={'name': 'database'},
         dry_run=True,
         extract_process=extract_process,

+ 0 - 16
tests/unit/hooks/data_source/test_zfs.py

@@ -154,7 +154,6 @@ def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
         module.dump_data_sources(
             hook_config={},
             config={'source_directories': '/mnt/dataset', 'zfs': {}},
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -177,7 +176,6 @@ def test_dump_data_sources_with_no_datasets_skips_snapshots():
         module.dump_data_sources(
             hook_config={},
             config={'patterns': flexmock(), 'zfs': {}},
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -227,7 +225,6 @@ def test_dump_data_sources_uses_custom_commands():
                 'patterns': flexmock(),
                 'zfs': hook_config,
             },
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -252,7 +249,6 @@ def test_dump_data_sources_with_dry_run_skips_commands_and_does_not_touch_patter
         module.dump_data_sources(
             hook_config={},
             config={'patterns': ('R /mnt/dataset',), 'zfs': {}},
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -295,7 +291,6 @@ def test_dump_data_sources_ignores_mismatch_between_given_patterns_and_contained
         module.dump_data_sources(
             hook_config={},
             config={'patterns': ('R /mnt/dataset',), 'zfs': {}},
-            log_prefix='test',
             config_paths=('test.yaml',),
             borgmatic_runtime_directory='/run/borgmatic',
             patterns=patterns,
@@ -338,7 +333,6 @@ def test_remove_data_source_dumps_unmounts_and_destroys_snapshots():
     module.remove_data_source_dumps(
         hook_config={},
         config={'source_directories': '/mnt/dataset', 'zfs': {}},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -366,7 +360,6 @@ def test_remove_data_source_dumps_use_custom_commands():
     module.remove_data_source_dumps(
         hook_config=hook_config,
         config={'source_directories': '/mnt/dataset', 'zfs': hook_config},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -381,7 +374,6 @@ def test_remove_data_source_dumps_bails_for_missing_hook_configuration():
     module.remove_data_source_dumps(
         hook_config=None,
         config={'source_directories': '/mnt/dataset'},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -397,7 +389,6 @@ def test_remove_data_source_dumps_bails_for_missing_zfs_command():
     module.remove_data_source_dumps(
         hook_config=hook_config,
         config={'source_directories': '/mnt/dataset', 'zfs': hook_config},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -415,7 +406,6 @@ def test_remove_data_source_dumps_bails_for_zfs_command_error():
     module.remove_data_source_dumps(
         hook_config=hook_config,
         config={'source_directories': '/mnt/dataset', 'zfs': hook_config},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -439,7 +429,6 @@ def test_remove_data_source_dumps_bails_for_missing_umount_command():
     module.remove_data_source_dumps(
         hook_config=hook_config,
         config={'source_directories': '/mnt/dataset', 'zfs': hook_config},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -463,7 +452,6 @@ def test_remove_data_source_dumps_bails_for_umount_command_error():
     module.remove_data_source_dumps(
         hook_config=hook_config,
         config={'source_directories': '/mnt/dataset', 'zfs': hook_config},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -488,7 +476,6 @@ def test_remove_data_source_dumps_skips_unmount_snapshot_directories_that_are_no
     module.remove_data_source_dumps(
         hook_config={},
         config={'source_directories': '/mnt/dataset', 'zfs': {}},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -518,7 +505,6 @@ def test_remove_data_source_dumps_skips_unmount_snapshot_mount_paths_that_are_no
     module.remove_data_source_dumps(
         hook_config={},
         config={'source_directories': '/mnt/dataset', 'zfs': {}},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -548,7 +534,6 @@ def test_remove_data_source_dumps_skips_unmount_snapshot_mount_paths_after_rmtre
     module.remove_data_source_dumps(
         hook_config={},
         config={'source_directories': '/mnt/dataset', 'zfs': {}},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=False,
     )
@@ -571,7 +556,6 @@ def test_remove_data_source_dumps_with_dry_run_skips_unmount_and_destroy():
     module.remove_data_source_dumps(
         hook_config={},
         config={'source_directories': '/mnt/dataset', 'zfs': {}},
-        log_prefix='test',
         borgmatic_runtime_directory='/run/borgmatic',
         dry_run=True,
     )

+ 0 - 1
tests/unit/hooks/monitoring/test_apprise.py

@@ -379,7 +379,6 @@ def test_destroy_monitor_does_not_raise():
     module.destroy_monitor(
         hook_config={},
         config={},
-        config_filename='test.yaml',
         monitoring_log_level=1,
         dry_run=False,
     )

+ 12 - 15
tests/unit/hooks/test_command.py

@@ -7,22 +7,20 @@ from borgmatic.hooks import command as module
 
 
 def test_interpolate_context_passes_through_command_without_variable():
-    assert module.interpolate_context('test.yaml', 'pre-backup', 'ls', {'foo': 'bar'}) == 'ls'
+    assert module.interpolate_context('pre-backup', 'ls', {'foo': 'bar'}) == 'ls'
 
 
 def test_interpolate_context_passes_through_command_with_unknown_variable():
     command = 'ls {baz}'  # noqa: FS003
 
-    assert module.interpolate_context('test.yaml', 'pre-backup', command, {'foo': 'bar'}) == command
+    assert module.interpolate_context('pre-backup', command, {'foo': 'bar'}) == command
 
 
 def test_interpolate_context_interpolates_variables():
     command = 'ls {foo}{baz} {baz}'  # noqa: FS003
     context = {'foo': 'bar', 'baz': 'quux'}
 
-    assert (
-        module.interpolate_context('test.yaml', 'pre-backup', command, context) == 'ls barquux quux'
-    )
+    assert module.interpolate_context('pre-backup', command, context) == 'ls barquux quux'
 
 
 def test_interpolate_context_escapes_interpolated_variables():
@@ -30,8 +28,7 @@ def test_interpolate_context_escapes_interpolated_variables():
     context = {'foo': 'bar', 'inject': 'hi; naughty-command'}
 
     assert (
-        module.interpolate_context('test.yaml', 'pre-backup', command, context)
-        == "ls bar 'hi; naughty-command'"
+        module.interpolate_context('pre-backup', command, context) == "ls bar 'hi; naughty-command'"
     )
 
 
@@ -53,7 +50,7 @@ def test_make_environment_with_pyinstaller_and_LD_LIBRARY_PATH_ORIG_copies_it_in
 
 def test_execute_hook_invokes_each_command():
     flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda config_file, hook_description, command, context: command
+        lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
@@ -68,7 +65,7 @@ def test_execute_hook_invokes_each_command():
 
 def test_execute_hook_with_multiple_commands_invokes_each_command():
     flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda config_file, hook_description, command, context: command
+        lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
@@ -89,7 +86,7 @@ def test_execute_hook_with_multiple_commands_invokes_each_command():
 
 def test_execute_hook_with_umask_sets_that_umask():
     flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda config_file, hook_description, command, context: command
+        lambda hook_description, command, context: command
     )
     flexmock(module.os).should_receive('umask').with_args(0o77).and_return(0o22).once()
     flexmock(module.os).should_receive('umask').with_args(0o22).once()
@@ -106,7 +103,7 @@ def test_execute_hook_with_umask_sets_that_umask():
 
 def test_execute_hook_with_dry_run_skips_commands():
     flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda config_file, hook_description, command, context: command
+        lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').never()
@@ -120,7 +117,7 @@ def test_execute_hook_with_empty_commands_does_not_raise():
 
 def test_execute_hook_on_error_logs_as_error():
     flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda config_file, hook_description, command, context: command
+        lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
@@ -136,14 +133,14 @@ def test_execute_hook_on_error_logs_as_error():
 def test_considered_soft_failure_treats_soft_fail_exit_code_as_soft_fail():
     error = subprocess.CalledProcessError(module.SOFT_FAIL_EXIT_CODE, 'try again')
 
-    assert module.considered_soft_failure('config.yaml', error)
+    assert module.considered_soft_failure(error)
 
 
 def test_considered_soft_failure_does_not_treat_other_exit_code_as_soft_fail():
     error = subprocess.CalledProcessError(1, 'error')
 
-    assert not module.considered_soft_failure('config.yaml', error)
+    assert not module.considered_soft_failure(error)
 
 
 def test_considered_soft_failure_does_not_treat_other_exception_type_as_soft_fail():
-    assert not module.considered_soft_failure('config.yaml', Exception())
+    assert not module.considered_soft_failure(Exception())

+ 17 - 19
tests/unit/hooks/test_dispatch.py

@@ -6,7 +6,7 @@ from flexmock import flexmock
 from borgmatic.hooks import dispatch as module
 
 
-def hook_function(hook_config, config, log_prefix, thing, value):
+def hook_function(hook_config, config, thing, value):
     '''
     This test function gets mocked out below.
     '''
@@ -27,10 +27,10 @@ def test_call_hook_invokes_module_function_with_arguments_and_returns_value():
         'borgmatic.hooks.monitoring.super_hook'
     ).and_return(test_module)
     flexmock(test_module).should_receive('hook_function').with_args(
-        config['super_hook'], config, 'prefix', 55, value=66
+        config['super_hook'], config, 55, value=66
     ).and_return(expected_return_value).once()
 
-    return_value = module.call_hook('hook_function', config, 'prefix', 'super_hook', 55, value=66)
+    return_value = module.call_hook('hook_function', config, 'super_hook', 55, value=66)
 
     assert return_value == expected_return_value
 
@@ -49,10 +49,10 @@ def test_call_hook_probes_config_with_databases_suffix():
         'borgmatic.hooks.monitoring.super_hook'
     ).and_return(test_module)
     flexmock(test_module).should_receive('hook_function').with_args(
-        config['super_hook_databases'], config, 'prefix', 55, value=66
+        config['super_hook_databases'], config, 55, value=66
     ).and_return(expected_return_value).once()
 
-    return_value = module.call_hook('hook_function', config, 'prefix', 'super_hook', 55, value=66)
+    return_value = module.call_hook('hook_function', config, 'super_hook', 55, value=66)
 
     assert return_value == expected_return_value
 
@@ -71,12 +71,10 @@ def test_call_hook_strips_databases_suffix_from_hook_name():
         'borgmatic.hooks.monitoring.super_hook'
     ).and_return(test_module)
     flexmock(test_module).should_receive('hook_function').with_args(
-        config['super_hook_databases'], config, 'prefix', 55, value=66
+        config['super_hook_databases'], config, 55, value=66
     ).and_return(expected_return_value).once()
 
-    return_value = module.call_hook(
-        'hook_function', config, 'prefix', 'super_hook_databases', 55, value=66
-    )
+    return_value = module.call_hook('hook_function', config, 'super_hook_databases', 55, value=66)
 
     assert return_value == expected_return_value
 
@@ -95,10 +93,10 @@ def test_call_hook_without_hook_config_invokes_module_function_with_arguments_an
         'borgmatic.hooks.monitoring.super_hook'
     ).and_return(test_module)
     flexmock(test_module).should_receive('hook_function').with_args(
-        None, config, 'prefix', 55, value=66
+        None, config, 55, value=66
     ).and_return(expected_return_value).once()
 
-    return_value = module.call_hook('hook_function', config, 'prefix', 'super_hook', 55, value=66)
+    return_value = module.call_hook('hook_function', config, 'super_hook', 55, value=66)
 
     assert return_value == expected_return_value
 
@@ -118,7 +116,7 @@ def test_call_hook_without_corresponding_module_raises():
     flexmock(test_module).should_receive('hook_function').never()
 
     with pytest.raises(ValueError):
-        module.call_hook('hook_function', config, 'prefix', 'super_hook', 55, value=66)
+        module.call_hook('hook_function', config, 'super_hook', 55, value=66)
 
 
 def test_call_hook_skips_non_hook_modules():
@@ -134,7 +132,7 @@ def test_call_hook_skips_non_hook_modules():
         'borgmatic.hooks.monitoring.not_a_hook'
     ).and_return(not_a_hook_module)
 
-    return_value = module.call_hook('hook_function', config, 'prefix', 'not_a_hook', 55, value=66)
+    return_value = module.call_hook('hook_function', config, 'not_a_hook', 55, value=66)
 
     assert return_value is None
 
@@ -152,7 +150,7 @@ def test_call_hooks_calls_each_hook_and_collects_return_values():
         expected_return_values['super_hook']
     ).and_return(expected_return_values['other_hook'])
 
-    return_values = module.call_hooks('do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55)
+    return_values = module.call_hooks('do_stuff', config, module.Hook_type.MONITORING, 55)
 
     assert return_values == expected_return_values
 
@@ -168,7 +166,7 @@ def test_call_hooks_calls_skips_return_values_for_unconfigured_hooks():
     ).and_return(['super_hook', 'other_hook'])
     flexmock(module).should_receive('call_hook').and_return(expected_return_values['super_hook'])
 
-    return_values = module.call_hooks('do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55)
+    return_values = module.call_hooks('do_stuff', config, module.Hook_type.MONITORING, 55)
 
     assert return_values == expected_return_values
 
@@ -186,7 +184,7 @@ def test_call_hooks_calls_treats_null_hook_as_optionless():
         expected_return_values['super_hook']
     ).and_return(expected_return_values['other_hook'])
 
-    return_values = module.call_hooks('do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55)
+    return_values = module.call_hooks('do_stuff', config, module.Hook_type.MONITORING, 55)
 
     assert return_values == expected_return_values
 
@@ -204,7 +202,7 @@ def test_call_hooks_calls_looks_up_databases_suffix_in_config():
         expected_return_values['super_hook']
     ).and_return(expected_return_values['other_hook'])
 
-    return_values = module.call_hooks('do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55)
+    return_values = module.call_hooks('do_stuff', config, module.Hook_type.MONITORING, 55)
 
     assert return_values == expected_return_values
 
@@ -223,7 +221,7 @@ def test_call_hooks_even_if_unconfigured_calls_each_hook_and_collects_return_val
     ).and_return(expected_return_values['other_hook'])
 
     return_values = module.call_hooks_even_if_unconfigured(
-        'do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55
+        'do_stuff', config, module.Hook_type.MONITORING, 55
     )
 
     assert return_values == expected_return_values
@@ -243,7 +241,7 @@ def test_call_hooks_even_if_unconfigured_calls_each_hook_configured_or_not_and_c
     ).and_return(expected_return_values['other_hook'])
 
     return_values = module.call_hooks_even_if_unconfigured(
-        'do_stuff', config, 'prefix', module.Hook_type.MONITORING, 55
+        'do_stuff', config, module.Hook_type.MONITORING, 55
     )
 
     assert return_values == expected_return_values

+ 8 - 0
tests/unit/test_execute.py

@@ -186,6 +186,7 @@ def test_execute_command_calls_full_command():
         env=None,
         cwd=None,
     ).and_return(flexmock(stdout=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command)
@@ -207,6 +208,7 @@ def test_execute_command_calls_full_command_with_output_file():
         env=None,
         cwd=None,
     ).and_return(flexmock(stderr=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, output_file=output_file)
@@ -222,6 +224,7 @@ def test_execute_command_calls_full_command_without_capturing_output():
         full_command, stdin=None, stdout=None, stderr=None, shell=False, env=None, cwd=None
     ).and_return(flexmock(wait=lambda: 0)).once()
     flexmock(module).should_receive('interpret_exit_code').and_return(module.Exit_status.SUCCESS)
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, output_file=module.DO_NOT_CAPTURE)
@@ -243,6 +246,7 @@ def test_execute_command_calls_full_command_with_input_file():
         env=None,
         cwd=None,
     ).and_return(flexmock(stdout=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, input_file=input_file)
@@ -263,6 +267,7 @@ def test_execute_command_calls_full_command_with_shell():
         env=None,
         cwd=None,
     ).and_return(flexmock(stdout=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, shell=True)
@@ -283,6 +288,7 @@ def test_execute_command_calls_full_command_with_extra_environment():
         env={'a': 'b', 'c': 'd'},
         cwd=None,
     ).and_return(flexmock(stdout=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, extra_environment={'c': 'd'})
@@ -303,6 +309,7 @@ def test_execute_command_calls_full_command_with_working_directory():
         env=None,
         cwd='/working',
     ).and_return(flexmock(stdout=None)).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     output = module.execute_command(full_command, working_directory='/working')
@@ -324,6 +331,7 @@ def test_execute_command_without_run_to_completion_returns_process():
         env=None,
         cwd=None,
     ).and_return(process).once()
+    flexmock(module.borgmatic.logger).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('log_outputs')
 
     assert module.execute_command(full_command, run_to_completion=False) == process

+ 137 - 2
tests/unit/test_logger.py

@@ -178,7 +178,8 @@ def test_console_color_formatter_format_includes_log_message():
     flexmock(module).should_receive('add_custom_log_levels')
     flexmock(module.logging).ANSWER = module.ANSWER
     plain_message = 'uh oh'
-    record = flexmock(levelno=logging.CRITICAL, msg=plain_message)
+    flexmock(module.logging.Formatter).should_receive('format').and_return(plain_message)
+    record = flexmock(levelno=logging.CRITICAL)
 
     colored_message = module.Console_color_formatter().format(record)
 
@@ -227,6 +228,138 @@ def test_add_logging_level_skips_global_setting_if_already_set():
     module.add_logging_level('PLAID', 99)
 
 
+def test_get_log_prefix_gets_prefix_from_first_handler():
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[
+                flexmock(
+                    formatter=flexmock(
+                        _style=flexmock(_defaults=flexmock(get=lambda name: 'myprefix: '))
+                    )
+                ),
+                flexmock(),
+            ],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    assert module.get_log_prefix() == 'myprefix'
+
+
+def test_get_log_prefix_with_no_handlers_does_not_raise():
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    assert module.get_log_prefix() is None
+
+
+def test_get_log_prefix_with_no_formatters_does_not_raise():
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[
+                flexmock(),
+                flexmock(),
+            ],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    assert module.get_log_prefix() is None
+
+
+def test_get_log_prefix_with_no_prefix_does_not_raise():
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[
+                flexmock(
+                    formatter=flexmock(_style=flexmock(_defaults=flexmock(get=lambda name: None)))
+                ),
+                flexmock(),
+            ],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    assert module.get_log_prefix() is None
+
+
+def test_set_log_prefix_updates_all_handlers():
+    styles = (
+        flexmock(_defaults=None),
+        flexmock(_defaults=None),
+    )
+
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[
+                flexmock(
+                    formatter=flexmock(
+                        _style=styles[0],
+                    )
+                ),
+                flexmock(
+                    formatter=flexmock(
+                        _style=styles[1],
+                    )
+                ),
+            ],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    module.set_log_prefix('myprefix')
+
+    for style in styles:
+        assert style._defaults == {'prefix': 'myprefix: '}
+
+
+def test_set_log_prefix_skips_handlers_without_a_formatter():
+    style = flexmock(_defaults=None)
+
+    flexmock(module.logging).should_receive('getLogger').and_return(
+        flexmock(
+            handlers=[
+                flexmock(
+                    formatter=None,
+                ),
+                flexmock(),
+                flexmock(
+                    formatter=flexmock(
+                        _style=style,
+                    )
+                ),
+            ],
+            removeHandler=lambda handler: None,
+        )
+    )
+
+    module.set_log_prefix('myprefix')
+
+    assert style._defaults == {'prefix': 'myprefix: '}
+
+
+def test_log_prefix_sets_prefix_and_then_restores_no_prefix_after():
+    flexmock(module).should_receive('get_log_prefix').and_return(None)
+    flexmock(module).should_receive('set_log_prefix').with_args('myprefix').once()
+    flexmock(module).should_receive('set_log_prefix').with_args(None).once()
+
+    with module.Log_prefix('myprefix'):
+        pass
+
+
+def test_log_prefix_sets_prefix_and_then_restores_original_prefix_after():
+    flexmock(module).should_receive('get_log_prefix').and_return('original')
+    flexmock(module).should_receive('set_log_prefix').with_args('myprefix').once()
+    flexmock(module).should_receive('set_log_prefix').with_args('original').once()
+
+    with module.Log_prefix('myprefix'):
+        pass
+
+
 def test_configure_logging_with_syslog_log_level_probes_for_log_socket_on_linux():
     flexmock(module).should_receive('add_custom_log_levels')
     flexmock(module.logging).ANSWER = module.ANSWER
@@ -409,7 +542,9 @@ def test_configure_logging_to_log_file_formats_with_custom_log_format():
     flexmock(module).should_receive('add_custom_log_levels')
     flexmock(module.logging).ANSWER = module.ANSWER
     flexmock(module.logging).should_receive('Formatter').with_args(
-        '{message}', style='{'  # noqa: FS003
+        '{message}',  # noqa: FS003
+        style='{',
+        defaults={'prefix': ''},
     ).once()
     fake_formatter = flexmock()
     flexmock(module).should_receive('Console_color_formatter').and_return(fake_formatter)