Просмотр исходного кода

Deprecate all "before_*", "after_*" and "on_error" command hooks in favor of more flexible "commands:" (#790).

Reviewed-on: https://projects.torsion.org/borgmatic-collective/borgmatic/pulls/1019
Dan Helfman 3 месяцев назад
Родитель
Сommit
7bdbadbac2
46 измененных файлов с 2993 добавлено и 1499 удалено
  1. 5 0
      NEWS
  2. 0 19
      borgmatic/actions/check.py
  3. 0 18
      borgmatic/actions/compact.py
  4. 0 19
      borgmatic/actions/create.py
  5. 0 17
      borgmatic/actions/extract.py
  6. 0 17
      borgmatic/actions/prune.py
  7. 429 403
      borgmatic/commands/borgmatic.py
  8. 47 35
      borgmatic/config/generate.py
  9. 85 0
      borgmatic/config/normalize.py
  10. 214 35
      borgmatic/config/schema.yaml
  11. 160 36
      borgmatic/hooks/command.py
  12. 35 27
      borgmatic/hooks/data_source/bootstrap.py
  13. 33 25
      borgmatic/hooks/data_source/btrfs.py
  14. 70 62
      borgmatic/hooks/data_source/lvm.py
  15. 58 50
      borgmatic/hooks/data_source/mariadb.py
  16. 45 36
      borgmatic/hooks/data_source/mongodb.py
  17. 58 50
      borgmatic/hooks/data_source/mysql.py
  18. 108 92
      borgmatic/hooks/data_source/postgresql.py
  19. 53 45
      borgmatic/hooks/data_source/sqlite.py
  20. 59 51
      borgmatic/hooks/data_source/zfs.py
  21. 1 0
      borgmatic/hooks/dispatch.py
  22. 1 0
      docs/_includes/index.css
  23. 203 48
      docs/how-to/add-preparation-and-cleanup-steps-to-backups.md
  24. 47 46
      docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server.md
  25. 49 134
      docs/how-to/monitor-your-backups.md
  26. 1 1
      pyproject.toml
  27. 140 31
      tests/integration/config/test_generate.py
  28. 0 10
      tests/unit/actions/test_check.py
  29. 0 4
      tests/unit/actions/test_compact.py
  30. 0 7
      tests/unit/actions/test_create.py
  31. 0 2
      tests/unit/actions/test_extract.py
  32. 0 4
      tests/unit/actions/test_prune.py
  33. 267 119
      tests/unit/commands/test_borgmatic.py
  34. 46 17
      tests/unit/config/test_generate.py
  35. 110 0
      tests/unit/config/test_normalize.py
  36. 9 0
      tests/unit/config/test_validate.py
  37. 7 0
      tests/unit/hooks/data_source/test_bootstrap.py
  38. 18 0
      tests/unit/hooks/data_source/test_btrfs.py
  39. 21 0
      tests/unit/hooks/data_source/test_lvm.py
  40. 18 0
      tests/unit/hooks/data_source/test_mariadb.py
  41. 21 0
      tests/unit/hooks/data_source/test_mongodb.py
  42. 18 0
      tests/unit/hooks/data_source/test_mysql.py
  43. 42 0
      tests/unit/hooks/data_source/test_postgresql.py
  44. 18 0
      tests/unit/hooks/data_source/test_sqlite.py
  45. 15 0
      tests/unit/hooks/data_source/test_zfs.py
  46. 482 39
      tests/unit/hooks/test_command.py

+ 5 - 0
NEWS

@@ -1,3 +1,8 @@
+2.0.0.dev0
+ * #790: Deprecate all "before_*", "after_*" and "on_error" command hooks in favor of more flexible
+   "commands:". See the documentation for more information:
+   https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/
+
 1.9.14
  * #409: With the PagerDuty monitoring hook, send borgmatic logs to PagerDuty so they show up in the
    incident UI. See the documentation for more information:

+ 0 - 19
borgmatic/actions/check.py

@@ -682,7 +682,6 @@ def run_check(
     config_filename,
     repository,
     config,
-    hook_context,
     local_borg_version,
     check_arguments,
     global_arguments,
@@ -699,15 +698,6 @@ def run_check(
     ):
         return
 
-    borgmatic.hooks.command.execute_hook(
-        config.get('before_check'),
-        config.get('umask'),
-        config_filename,
-        'pre-check',
-        global_arguments.dry_run,
-        **hook_context,
-    )
-
     logger.info('Running consistency checks')
 
     repository_id = borgmatic.borg.check.get_repository_id(
@@ -772,12 +762,3 @@ def run_check(
                 borgmatic_runtime_directory,
             )
         write_check_time(make_check_time_path(config, repository_id, 'spot'))
-
-    borgmatic.hooks.command.execute_hook(
-        config.get('after_check'),
-        config.get('umask'),
-        config_filename,
-        'post-check',
-        global_arguments.dry_run,
-        **hook_context,
-    )

+ 0 - 18
borgmatic/actions/compact.py

@@ -12,7 +12,6 @@ def run_compact(
     config_filename,
     repository,
     config,
-    hook_context,
     local_borg_version,
     compact_arguments,
     global_arguments,
@@ -28,14 +27,6 @@ def run_compact(
     ):
         return
 
-    borgmatic.hooks.command.execute_hook(
-        config.get('before_compact'),
-        config.get('umask'),
-        config_filename,
-        'pre-compact',
-        global_arguments.dry_run,
-        **hook_context,
-    )
     if borgmatic.borg.feature.available(borgmatic.borg.feature.Feature.COMPACT, local_borg_version):
         logger.info(f'Compacting segments{dry_run_label}')
         borgmatic.borg.compact.compact_segments(
@@ -52,12 +43,3 @@ def run_compact(
         )
     else:  # pragma: nocover
         logger.info('Skipping compact (only available/needed in Borg 1.2+)')
-
-    borgmatic.hooks.command.execute_hook(
-        config.get('after_compact'),
-        config.get('umask'),
-        config_filename,
-        'post-compact',
-        global_arguments.dry_run,
-        **hook_context,
-    )

+ 0 - 19
borgmatic/actions/create.py

@@ -272,7 +272,6 @@ def run_create(
     repository,
     config,
     config_paths,
-    hook_context,
     local_borg_version,
     create_arguments,
     global_arguments,
@@ -290,15 +289,6 @@ def run_create(
     ):
         return
 
-    borgmatic.hooks.command.execute_hook(
-        config.get('before_backup'),
-        config.get('umask'),
-        config_filename,
-        'pre-backup',
-        global_arguments.dry_run,
-        **hook_context,
-    )
-
     logger.info(f'Creating archive{dry_run_label}')
     working_directory = borgmatic.config.paths.get_working_directory(config)
 
@@ -354,12 +344,3 @@ def run_create(
             borgmatic_runtime_directory,
             global_arguments.dry_run,
         )
-
-    borgmatic.hooks.command.execute_hook(
-        config.get('after_backup'),
-        config.get('umask'),
-        config_filename,
-        'post-backup',
-        global_arguments.dry_run,
-        **hook_context,
-    )

+ 0 - 17
borgmatic/actions/extract.py

@@ -12,7 +12,6 @@ def run_extract(
     config_filename,
     repository,
     config,
-    hook_context,
     local_borg_version,
     extract_arguments,
     global_arguments,
@@ -22,14 +21,6 @@ def run_extract(
     '''
     Run the "extract" action for the given repository.
     '''
-    borgmatic.hooks.command.execute_hook(
-        config.get('before_extract'),
-        config.get('umask'),
-        config_filename,
-        'pre-extract',
-        global_arguments.dry_run,
-        **hook_context,
-    )
     if extract_arguments.repository is None or borgmatic.config.validate.repositories_match(
         repository, extract_arguments.repository
     ):
@@ -56,11 +47,3 @@ def run_extract(
             strip_components=extract_arguments.strip_components,
             progress=extract_arguments.progress,
         )
-    borgmatic.hooks.command.execute_hook(
-        config.get('after_extract'),
-        config.get('umask'),
-        config_filename,
-        'post-extract',
-        global_arguments.dry_run,
-        **hook_context,
-    )

+ 0 - 17
borgmatic/actions/prune.py

@@ -11,7 +11,6 @@ def run_prune(
     config_filename,
     repository,
     config,
-    hook_context,
     local_borg_version,
     prune_arguments,
     global_arguments,
@@ -27,14 +26,6 @@ def run_prune(
     ):
         return
 
-    borgmatic.hooks.command.execute_hook(
-        config.get('before_prune'),
-        config.get('umask'),
-        config_filename,
-        'pre-prune',
-        global_arguments.dry_run,
-        **hook_context,
-    )
     logger.info(f'Pruning archives{dry_run_label}')
     borgmatic.borg.prune.prune_archives(
         global_arguments.dry_run,
@@ -46,11 +37,3 @@ def run_prune(
         local_path=local_path,
         remote_path=remote_path,
     )
-    borgmatic.hooks.command.execute_hook(
-        config.get('after_prune'),
-        config.get('umask'),
-        config_filename,
-        'post-prune',
-        global_arguments.dry_run,
-        **hook_context,
-    )

+ 429 - 403
borgmatic/commands/borgmatic.py

@@ -85,7 +85,7 @@ def run_configuration(config_filename, config, config_paths, arguments):
     retries = config.get('retries', 0)
     retry_wait = config.get('retry_wait', 0)
     encountered_error = None
-    error_repository = ''
+    error_repository = None
     using_primary_action = {'create', 'prune', 'compact', 'check'}.intersection(arguments)
     monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
     monitoring_hooks_are_activated = using_primary_action and monitoring_log_level != DISABLED
@@ -96,126 +96,41 @@ def run_configuration(config_filename, config, config_paths, arguments):
             f"Skipping {'/'.join(skip_actions)} action{'s' if len(skip_actions) > 1 else ''} due to configured skip_actions"
         )
 
-    try:
-        local_borg_version = borg_version.local_borg_version(config, local_path)
-        logger.debug(f'Borg {local_borg_version}')
-    except (OSError, CalledProcessError, ValueError) as error:
-        yield from log_error_records(f'{config_filename}: Error getting local Borg version', error)
-        return
-
-    try:
-        if monitoring_hooks_are_activated:
-            dispatch.call_hooks(
-                'initialize_monitor',
-                config,
-                dispatch.Hook_type.MONITORING,
-                config_filename,
-                monitoring_log_level,
-                global_arguments.dry_run,
-            )
-
-            dispatch.call_hooks(
-                'ping_monitor',
-                config,
-                dispatch.Hook_type.MONITORING,
-                config_filename,
-                monitor.State.START,
-                monitoring_log_level,
-                global_arguments.dry_run,
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='configuration',
+        umask=config.get('umask'),
+        dry_run=global_arguments.dry_run,
+        action_names=arguments.keys(),
+        configuration_filename=config_filename,
+        log_file=arguments['global'].log_file or '',
+    ):
+        try:
+            local_borg_version = borg_version.local_borg_version(config, local_path)
+            logger.debug(f'Borg {local_borg_version}')
+        except (OSError, CalledProcessError, ValueError) as error:
+            yield from log_error_records(
+                f'{config_filename}: Error getting local Borg version', error
             )
-    except (OSError, CalledProcessError) as error:
-        if command.considered_soft_failure(error):
             return
 
-        encountered_error = error
-        yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
-
-    if not encountered_error:
-        repo_queue = Queue()
-        for repo in config['repositories']:
-            repo_queue.put(
-                (repo, 0),
-            )
-
-        while not repo_queue.empty():
-            repository, retry_num = repo_queue.get()
-
-            with Log_prefix(repository.get('label', repository['path'])):
-                logger.debug('Running actions for repository')
-                timeout = retry_num * retry_wait
-                if timeout:
-                    logger.warning(f'Sleeping {timeout}s before next retry')
-                    time.sleep(timeout)
-                try:
-                    yield from run_actions(
-                        arguments=arguments,
-                        config_filename=config_filename,
-                        config=config,
-                        config_paths=config_paths,
-                        local_path=local_path,
-                        remote_path=remote_path,
-                        local_borg_version=local_borg_version,
-                        repository=repository,
-                    )
-                except (OSError, CalledProcessError, ValueError) as error:
-                    if retry_num < retries:
-                        repo_queue.put(
-                            (repository, retry_num + 1),
-                        )
-                        tuple(  # Consume the generator so as to trigger logging.
-                            log_error_records(
-                                'Error running actions for repository',
-                                error,
-                                levelno=logging.WARNING,
-                                log_command_error_output=True,
-                            )
-                        )
-                        logger.warning(f'Retrying... attempt {retry_num + 1}/{retries}')
-                        continue
-
-                    if command.considered_soft_failure(error):
-                        continue
-
-                    yield from log_error_records(
-                        'Error running actions for repository',
-                        error,
-                    )
-                    encountered_error = error
-                    error_repository = repository['path']
-
-    try:
-        if monitoring_hooks_are_activated:
-            # Send logs irrespective of error.
-            dispatch.call_hooks(
-                'ping_monitor',
-                config,
-                dispatch.Hook_type.MONITORING,
-                config_filename,
-                monitor.State.LOG,
-                monitoring_log_level,
-                global_arguments.dry_run,
-            )
-    except (OSError, CalledProcessError) as error:
-        if not command.considered_soft_failure(error):
-            encountered_error = error
-            yield from log_error_records('Error pinging monitor', error)
-
-    if not encountered_error:
         try:
             if monitoring_hooks_are_activated:
                 dispatch.call_hooks(
-                    'ping_monitor',
+                    'initialize_monitor',
                     config,
                     dispatch.Hook_type.MONITORING,
                     config_filename,
-                    monitor.State.FINISH,
                     monitoring_log_level,
                     global_arguments.dry_run,
                 )
+
                 dispatch.call_hooks(
-                    'destroy_monitor',
+                    'ping_monitor',
                     config,
                     dispatch.Hook_type.MONITORING,
+                    config_filename,
+                    monitor.State.START,
                     monitoring_log_level,
                     global_arguments.dry_run,
                 )
@@ -226,39 +141,142 @@ def run_configuration(config_filename, config, config_paths, arguments):
             encountered_error = error
             yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
 
-    if encountered_error and using_primary_action:
+        if not encountered_error:
+            repo_queue = Queue()
+            for repo in config['repositories']:
+                repo_queue.put(
+                    (repo, 0),
+                )
+
+            while not repo_queue.empty():
+                repository, retry_num = repo_queue.get()
+
+                with Log_prefix(repository.get('label', repository['path'])):
+                    logger.debug('Running actions for repository')
+                    timeout = retry_num * retry_wait
+                    if timeout:
+                        logger.warning(f'Sleeping {timeout}s before next retry')
+                        time.sleep(timeout)
+                    try:
+                        yield from run_actions(
+                            arguments=arguments,
+                            config_filename=config_filename,
+                            config=config,
+                            config_paths=config_paths,
+                            local_path=local_path,
+                            remote_path=remote_path,
+                            local_borg_version=local_borg_version,
+                            repository=repository,
+                        )
+                    except (OSError, CalledProcessError, ValueError) as error:
+                        if retry_num < retries:
+                            repo_queue.put(
+                                (repository, retry_num + 1),
+                            )
+                            tuple(  # Consume the generator so as to trigger logging.
+                                log_error_records(
+                                    'Error running actions for repository',
+                                    error,
+                                    levelno=logging.WARNING,
+                                    log_command_error_output=True,
+                                )
+                            )
+                            logger.warning(f'Retrying... attempt {retry_num + 1}/{retries}')
+                            continue
+
+                        if command.considered_soft_failure(error):
+                            continue
+
+                        yield from log_error_records(
+                            'Error running actions for repository',
+                            error,
+                        )
+                        encountered_error = error
+                        error_repository = repository
+
+        try:
+            if monitoring_hooks_are_activated:
+                # Send logs irrespective of error.
+                dispatch.call_hooks(
+                    'ping_monitor',
+                    config,
+                    dispatch.Hook_type.MONITORING,
+                    config_filename,
+                    monitor.State.LOG,
+                    monitoring_log_level,
+                    global_arguments.dry_run,
+                )
+        except (OSError, CalledProcessError) as error:
+            encountered_error = error
+            yield from log_error_records('Error pinging monitor', error)
+
+        if not encountered_error:
+            try:
+                if monitoring_hooks_are_activated:
+                    dispatch.call_hooks(
+                        'ping_monitor',
+                        config,
+                        dispatch.Hook_type.MONITORING,
+                        config_filename,
+                        monitor.State.FINISH,
+                        monitoring_log_level,
+                        global_arguments.dry_run,
+                    )
+                    dispatch.call_hooks(
+                        'destroy_monitor',
+                        config,
+                        dispatch.Hook_type.MONITORING,
+                        monitoring_log_level,
+                        global_arguments.dry_run,
+                    )
+            except (OSError, CalledProcessError) as error:
+                encountered_error = error
+                yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
+            else:
+                return
+
         try:
-            command.execute_hook(
-                config.get('on_error'),
+            command.execute_hooks(
+                command.filter_hooks(
+                    config.get('commands'), after='error', action_names=arguments.keys()
+                ),
                 config.get('umask'),
-                config_filename,
-                'on-error',
                 global_arguments.dry_run,
-                repository=error_repository,
+                configuration_filename=config_filename,
+                log_file=arguments['global'].log_file or '',
+                repository=error_repository.get('path', '') if error_repository else '',
+                repository_label=error_repository.get('label', '') if error_repository else '',
                 error=encountered_error,
                 output=getattr(encountered_error, 'output', ''),
             )
-            dispatch.call_hooks(
-                'ping_monitor',
-                config,
-                dispatch.Hook_type.MONITORING,
-                config_filename,
-                monitor.State.FAIL,
-                monitoring_log_level,
-                global_arguments.dry_run,
-            )
-            dispatch.call_hooks(
-                'destroy_monitor',
-                config,
-                dispatch.Hook_type.MONITORING,
-                monitoring_log_level,
-                global_arguments.dry_run,
-            )
         except (OSError, CalledProcessError) as error:
             if command.considered_soft_failure(error):
                 return
 
-            yield from log_error_records(f'{config_filename}: Error running on-error hook', error)
+            yield from log_error_records(
+                f'{config_filename}: Error running after error hook', error
+            )
+
+        try:
+            if monitoring_hooks_are_activated:
+                dispatch.call_hooks(
+                    'ping_monitor',
+                    config,
+                    dispatch.Hook_type.MONITORING,
+                    config_filename,
+                    monitor.State.FAIL,
+                    monitoring_log_level,
+                    global_arguments.dry_run,
+                )
+                dispatch.call_hooks(
+                    'destroy_monitor',
+                    config,
+                    dispatch.Hook_type.MONITORING,
+                    monitoring_log_level,
+                    global_arguments.dry_run,
+                )
+        except (OSError, CalledProcessError) as error:
+            yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
 
 
 def run_actions(
@@ -289,6 +307,7 @@ def run_actions(
     global_arguments = arguments['global']
     dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
     hook_context = {
+        'configuration_filename': config_filename,
         'repository_label': repository.get('label', ''),
         'log_file': global_arguments.log_file if global_arguments.log_file else '',
         # Deprecated: For backwards compatibility with borgmatic < 1.6.0.
@@ -297,240 +316,236 @@ def run_actions(
     }
     skip_actions = set(get_skip_actions(config, arguments))
 
-    command.execute_hook(
-        config.get('before_actions'),
-        config.get('umask'),
-        config_filename,
-        'pre-actions',
-        global_arguments.dry_run,
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='repository',
+        umask=config.get('umask'),
+        dry_run=global_arguments.dry_run,
+        action_names=arguments.keys(),
         **hook_context,
-    )
-
-    for action_name, action_arguments in arguments.items():
-        if action_name == 'repo-create' and action_name not in skip_actions:
-            borgmatic.actions.repo_create.run_repo_create(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'transfer' and action_name not in skip_actions:
-            borgmatic.actions.transfer.run_transfer(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'create' and action_name not in skip_actions:
-            yield from borgmatic.actions.create.run_create(
-                config_filename,
-                repository,
-                config,
-                config_paths,
-                hook_context,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                dry_run_label,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'prune' and action_name not in skip_actions:
-            borgmatic.actions.prune.run_prune(
-                config_filename,
-                repository,
-                config,
-                hook_context,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                dry_run_label,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'compact' and action_name not in skip_actions:
-            borgmatic.actions.compact.run_compact(
-                config_filename,
-                repository,
-                config,
-                hook_context,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                dry_run_label,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'check' and action_name not in skip_actions:
-            if checks.repository_enabled_for_checks(repository, config):
-                borgmatic.actions.check.run_check(
-                    config_filename,
-                    repository,
-                    config,
-                    hook_context,
-                    local_borg_version,
-                    action_arguments,
-                    global_arguments,
-                    local_path,
-                    remote_path,
-                )
-        elif action_name == 'extract' and action_name not in skip_actions:
-            borgmatic.actions.extract.run_extract(
-                config_filename,
-                repository,
-                config,
-                hook_context,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'export-tar' and action_name not in skip_actions:
-            borgmatic.actions.export_tar.run_export_tar(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'mount' and action_name not in skip_actions:
-            borgmatic.actions.mount.run_mount(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'restore' and action_name not in skip_actions:
-            borgmatic.actions.restore.run_restore(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'repo-list' and action_name not in skip_actions:
-            yield from borgmatic.actions.repo_list.run_repo_list(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'list' and action_name not in skip_actions:
-            yield from borgmatic.actions.list.run_list(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'repo-info' and action_name not in skip_actions:
-            yield from borgmatic.actions.repo_info.run_repo_info(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'info' and action_name not in skip_actions:
-            yield from borgmatic.actions.info.run_info(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'break-lock' and action_name not in skip_actions:
-            borgmatic.actions.break_lock.run_break_lock(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'export' and action_name not in skip_actions:
-            borgmatic.actions.export_key.run_export_key(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'change-passphrase' and action_name not in skip_actions:
-            borgmatic.actions.change_passphrase.run_change_passphrase(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'delete' and action_name not in skip_actions:
-            borgmatic.actions.delete.run_delete(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'repo-delete' and action_name not in skip_actions:
-            borgmatic.actions.repo_delete.run_repo_delete(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-        elif action_name == 'borg' and action_name not in skip_actions:
-            borgmatic.actions.borg.run_borg(
-                repository,
-                config,
-                local_borg_version,
-                action_arguments,
-                global_arguments,
-                local_path,
-                remote_path,
-            )
-
-    command.execute_hook(
-        config.get('after_actions'),
-        config.get('umask'),
-        config_filename,
-        'post-actions',
-        global_arguments.dry_run,
-        **hook_context,
-    )
+    ):
+        for action_name, action_arguments in arguments.items():
+            if action_name == 'global':
+                continue
+
+            with borgmatic.hooks.command.Before_after_hooks(
+                command_hooks=config.get('commands'),
+                before_after='action',
+                umask=config.get('umask'),
+                dry_run=global_arguments.dry_run,
+                action_names=arguments.keys(),
+                **hook_context,
+            ):
+                if action_name == 'repo-create' and action_name not in skip_actions:
+                    borgmatic.actions.repo_create.run_repo_create(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'transfer' and action_name not in skip_actions:
+                    borgmatic.actions.transfer.run_transfer(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'create' and action_name not in skip_actions:
+                    yield from borgmatic.actions.create.run_create(
+                        config_filename,
+                        repository,
+                        config,
+                        config_paths,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        dry_run_label,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'prune' and action_name not in skip_actions:
+                    borgmatic.actions.prune.run_prune(
+                        config_filename,
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        dry_run_label,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'compact' and action_name not in skip_actions:
+                    borgmatic.actions.compact.run_compact(
+                        config_filename,
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        dry_run_label,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'check' and action_name not in skip_actions:
+                    if checks.repository_enabled_for_checks(repository, config):
+                        borgmatic.actions.check.run_check(
+                            config_filename,
+                            repository,
+                            config,
+                            local_borg_version,
+                            action_arguments,
+                            global_arguments,
+                            local_path,
+                            remote_path,
+                        )
+                elif action_name == 'extract' and action_name not in skip_actions:
+                    borgmatic.actions.extract.run_extract(
+                        config_filename,
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'export-tar' and action_name not in skip_actions:
+                    borgmatic.actions.export_tar.run_export_tar(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'mount' and action_name not in skip_actions:
+                    borgmatic.actions.mount.run_mount(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'restore' and action_name not in skip_actions:
+                    borgmatic.actions.restore.run_restore(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'repo-list' and action_name not in skip_actions:
+                    yield from borgmatic.actions.repo_list.run_repo_list(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'list' and action_name not in skip_actions:
+                    yield from borgmatic.actions.list.run_list(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'repo-info' and action_name not in skip_actions:
+                    yield from borgmatic.actions.repo_info.run_repo_info(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'info' and action_name not in skip_actions:
+                    yield from borgmatic.actions.info.run_info(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'break-lock' and action_name not in skip_actions:
+                    borgmatic.actions.break_lock.run_break_lock(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'export' and action_name not in skip_actions:
+                    borgmatic.actions.export_key.run_export_key(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'change-passphrase' and action_name not in skip_actions:
+                    borgmatic.actions.change_passphrase.run_change_passphrase(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'delete' and action_name not in skip_actions:
+                    borgmatic.actions.delete.run_delete(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'repo-delete' and action_name not in skip_actions:
+                    borgmatic.actions.repo_delete.run_repo_delete(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
+                elif action_name == 'borg' and action_name not in skip_actions:
+                    borgmatic.actions.borg.run_borg(
+                        repository,
+                        config,
+                        local_borg_version,
+                        action_arguments,
+                        global_arguments,
+                        local_path,
+                        remote_path,
+                    )
 
 
 def load_configurations(config_filenames, overrides=None, resolve_env=True):
@@ -810,43 +825,53 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
         )
         return
 
-    if 'create' in arguments:
-        try:
-            for config_filename, config in configs.items():
-                command.execute_hook(
-                    config.get('before_everything'),
-                    config.get('umask'),
-                    config_filename,
-                    'pre-everything',
-                    arguments['global'].dry_run,
-                )
-        except (CalledProcessError, ValueError, OSError) as error:
-            yield from log_error_records('Error running pre-everything hook', error)
-            return
+    try:
+        for config_filename, config in configs.items():
+            command.execute_hooks(
+                command.filter_hooks(
+                    config.get('commands'), before='everything', action_names=arguments.keys()
+                ),
+                config.get('umask'),
+                arguments['global'].dry_run,
+                configuration_filename=config_filename,
+                log_file=arguments['global'].log_file or '',
+            )
+    except (CalledProcessError, ValueError, OSError) as error:
+        yield from log_error_records('Error running before everything hook', error)
+        return
 
     # Execute the actions corresponding to each configuration file.
     json_results = []
 
     for config_filename, config in configs.items():
         with Log_prefix(config_filename):
-            results = list(run_configuration(config_filename, config, config_paths, arguments))
-            error_logs = tuple(
-                result for result in results if isinstance(result, logging.LogRecord)
-            )
-
-            if error_logs:
-                yield from log_error_records('An error occurred')
-                yield from error_logs
+            try:
+                results = list(run_configuration(config_filename, config, config_paths, arguments))
+            except (OSError, CalledProcessError, ValueError) as error:
+                yield from log_error_records(
+                    'Error running configuration file',
+                    error,
+                    levelno=logging.CRITICAL,
+                    log_command_error_output=True,
+                )
             else:
-                yield logging.makeLogRecord(
-                    dict(
-                        levelno=logging.INFO,
-                        levelname='INFO',
-                        msg='Successfully ran configuration file',
-                    )
+                error_logs = tuple(
+                    result for result in results if isinstance(result, logging.LogRecord)
                 )
-                if results:
-                    json_results.extend(results)
+
+                if error_logs:
+                    yield from log_error_records('An error occurred')
+                    yield from error_logs
+                else:
+                    yield logging.makeLogRecord(
+                        dict(
+                            levelno=logging.INFO,
+                            levelname='INFO',
+                            msg='Successfully ran configuration file',
+                        )
+                    )
+                    if results:
+                        json_results.extend(results)
 
     if 'umount' in arguments:
         logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")
@@ -862,18 +887,19 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
     if json_results:
         sys.stdout.write(json.dumps(json_results))
 
-    if 'create' in arguments:
-        try:
-            for config_filename, config in configs.items():
-                command.execute_hook(
-                    config.get('after_everything'),
-                    config.get('umask'),
-                    config_filename,
-                    'post-everything',
-                    arguments['global'].dry_run,
-                )
-        except (CalledProcessError, ValueError, OSError) as error:
-            yield from log_error_records('Error running post-everything hook', error)
+    try:
+        for config_filename, config in configs.items():
+            command.execute_hooks(
+                command.filter_hooks(
+                    config.get('commands'), after='everything', action_names=arguments.keys()
+                ),
+                config.get('umask'),
+                arguments['global'].dry_run,
+                configuration_filename=config_filename,
+                log_file=arguments['global'].log_file or '',
+            )
+    except (CalledProcessError, ValueError, OSError) as error:
+        yield from log_error_records('Error running after everything hook', error)
 
 
 def exit_with_help_link():  # pragma: no cover

+ 47 - 35
borgmatic/config/generate.py

@@ -1,5 +1,6 @@
 import collections
 import io
+import itertools
 import os
 import re
 
@@ -24,20 +25,31 @@ def insert_newline_before_comment(config, field_name):
 def get_properties(schema):
     '''
     Given a schema dict, return its properties. But if it's got sub-schemas with multiple different
-    potential properties, returned their merged properties instead.
+    potential properties, returned their merged properties instead (interleaved so the first
+    properties of each sub-schema come first). The idea is that the user should see all possible
+    options even if they're not all possible together.
     '''
     if 'oneOf' in schema:
         return dict(
-            collections.ChainMap(*[sub_schema['properties'] for sub_schema in schema['oneOf']])
+            item
+            for item in itertools.chain(
+                *itertools.zip_longest(
+                    *[sub_schema['properties'].items() for sub_schema in schema['oneOf']]
+                )
+            )
+            if item is not None
         )
 
     return schema['properties']
 
 
-def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
+def schema_to_sample_configuration(schema, source_config=None, level=0, parent_is_sequence=False):
     '''
-    Given a loaded configuration schema, generate and return sample config for it. Include comments
-    for each option based on the schema "description".
+    Given a loaded configuration schema and a source configuration, generate and return sample
+    config for the schema. Include comments for each option based on the schema "description".
+
+    If a source config is given, walk it alongside the given schema so that both can be taken into
+    account when commenting out particular options in add_comments_to_configuration_object().
     '''
     schema_type = schema.get('type')
     example = schema.get('example')
@@ -47,19 +59,31 @@ def schema_to_sample_configuration(schema, level=0, parent_is_sequence=False):
 
     if schema_type == 'array' or (isinstance(schema_type, list) and 'array' in schema_type):
         config = ruamel.yaml.comments.CommentedSeq(
-            [schema_to_sample_configuration(schema['items'], level, parent_is_sequence=True)]
+            [
+                schema_to_sample_configuration(
+                    schema['items'], source_config, level, parent_is_sequence=True
+                )
+            ]
         )
         add_comments_to_configuration_sequence(config, schema, indent=(level * INDENT))
     elif schema_type == 'object' or (isinstance(schema_type, list) and 'object' in schema_type):
+        if source_config and isinstance(source_config, list) and isinstance(source_config[0], dict):
+            source_config = dict(collections.ChainMap(*source_config))
+
         config = ruamel.yaml.comments.CommentedMap(
             [
-                (field_name, schema_to_sample_configuration(sub_schema, level + 1))
+                (
+                    field_name,
+                    schema_to_sample_configuration(
+                        sub_schema, (source_config or {}).get(field_name, {}), level + 1
+                    ),
+                )
                 for field_name, sub_schema in get_properties(schema).items()
             ]
         )
         indent = (level * INDENT) + (SEQUENCE_INDENT if parent_is_sequence else 0)
         add_comments_to_configuration_object(
-            config, schema, indent=indent, skip_first=parent_is_sequence
+            config, schema, source_config, indent=indent, skip_first=parent_is_sequence
         )
     else:
         raise ValueError(f'Schema at level {level} is unsupported: {schema}')
@@ -179,14 +203,21 @@ def add_comments_to_configuration_sequence(config, schema, indent=0):
         return
 
 
-REQUIRED_KEYS = {'source_directories', 'repositories', 'keep_daily'}
+DEFAULT_KEYS = {'source_directories', 'repositories', 'keep_daily'}
 COMMENTED_OUT_SENTINEL = 'COMMENT_OUT'
 
 
-def add_comments_to_configuration_object(config, schema, indent=0, skip_first=False):
+def add_comments_to_configuration_object(
+    config, schema, source_config=None, indent=0, skip_first=False
+):
     '''
     Using descriptions from a schema as a source, add those descriptions as comments to the given
-    config mapping, before each field. Indent the comment the given number of characters.
+    configuration dict, putting them before each field. Indent the comment the given number of
+    characters.
+
+    And a sentinel for commenting out options that are neither in DEFAULT_KEYS nor the the given
+    source configuration dict. The idea is that any options used in the source configuration should
+    stay active in the generated configuration.
     '''
     for index, field_name in enumerate(config.keys()):
         if skip_first and index == 0:
@@ -195,10 +226,12 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
         field_schema = get_properties(schema).get(field_name, {})
         description = field_schema.get('description', '').strip()
 
-        # If this is an optional key, add an indicator to the comment flagging it to be commented
+        # If this isn't a default key, add an indicator to the comment flagging it to be commented
         # out from the sample configuration. This sentinel is consumed by downstream processing that
         # does the actual commenting out.
-        if field_name not in REQUIRED_KEYS:
+        if field_name not in DEFAULT_KEYS and (
+            source_config is None or field_name not in source_config
+        ):
             description = (
                 '\n'.join((description, COMMENTED_OUT_SENTINEL))
                 if description
@@ -218,21 +251,6 @@ def add_comments_to_configuration_object(config, schema, indent=0, skip_first=Fa
 RUAMEL_YAML_COMMENTS_INDEX = 1
 
 
-def remove_commented_out_sentinel(config, field_name):
-    '''
-    Given a configuration CommentedMap and a top-level field name in it, remove any "commented out"
-    sentinel found at the end of its YAML comments. This prevents the given field name from getting
-    commented out by downstream processing that consumes the sentinel.
-    '''
-    try:
-        last_comment_value = config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX][-1].value
-    except KeyError:
-        return
-
-    if last_comment_value == f'# {COMMENTED_OUT_SENTINEL}\n':
-        config.ca.items[field_name][RUAMEL_YAML_COMMENTS_INDEX].pop()
-
-
 def merge_source_configuration_into_destination(destination_config, source_config):
     '''
     Deep merge the given source configuration dict into the destination configuration CommentedMap,
@@ -247,12 +265,6 @@ def merge_source_configuration_into_destination(destination_config, source_confi
         return source_config
 
     for field_name, source_value in source_config.items():
-        # Since this key/value is from the source configuration, leave it uncommented and remove any
-        # sentinel that would cause it to get commented out.
-        remove_commented_out_sentinel(
-            ruamel.yaml.comments.CommentedMap(destination_config), field_name
-        )
-
         # This is a mapping. Recurse for this key/value.
         if isinstance(source_value, collections.abc.Mapping):
             destination_config[field_name] = merge_source_configuration_into_destination(
@@ -298,7 +310,7 @@ def generate_sample_configuration(
         normalize.normalize(source_filename, source_config)
 
     destination_config = merge_source_configuration_into_destination(
-        schema_to_sample_configuration(schema), source_config
+        schema_to_sample_configuration(schema, source_config), source_config
     )
 
     if dry_run:

+ 85 - 0
borgmatic/config/normalize.py

@@ -58,6 +58,90 @@ def normalize_sections(config_filename, config):
     return []
 
 
+def make_command_hook_deprecation_log(config_filename, option_name):  # pragma: no cover
+    '''
+    Given a configuration filename and the name of a configuration option, return a deprecation
+    warning log for it.
+    '''
+    return logging.makeLogRecord(
+        dict(
+            levelno=logging.WARNING,
+            levelname='WARNING',
+            msg=f'{config_filename}: {option_name} is deprecated and support will be removed from a future release. Use commands: instead.',
+        )
+    )
+
+
+def normalize_commands(config_filename, config):
+    '''
+    Given a configuration filename and a configuration dict, transform any "before_*"- and
+    "after_*"-style command hooks into "commands:".
+    '''
+    logs = []
+
+    # Normalize "before_actions" and "after_actions".
+    for preposition in ('before', 'after'):
+        option_name = f'{preposition}_actions'
+        commands = config.pop(option_name, None)
+
+        if commands:
+            logs.append(make_command_hook_deprecation_log(config_filename, option_name))
+            config.setdefault('commands', []).append(
+                {
+                    preposition: 'repository',
+                    'run': commands,
+                }
+            )
+
+    # Normalize "before_backup", "before_prune", "after_backup", "after_prune", etc.
+    for action_name in ('create', 'prune', 'compact', 'check', 'extract'):
+        for preposition in ('before', 'after'):
+            option_name = f'{preposition}_{"backup" if action_name == "create" else action_name}'
+            commands = config.pop(option_name, None)
+
+            if not commands:
+                continue
+
+            logs.append(make_command_hook_deprecation_log(config_filename, option_name))
+            config.setdefault('commands', []).append(
+                {
+                    preposition: 'action',
+                    'when': [action_name],
+                    'run': commands,
+                }
+            )
+
+    # Normalize "on_error".
+    commands = config.pop('on_error', None)
+
+    if commands:
+        logs.append(make_command_hook_deprecation_log(config_filename, 'on_error'))
+        config.setdefault('commands', []).append(
+            {
+                'after': 'error',
+                'when': ['create', 'prune', 'compact', 'check'],
+                'run': commands,
+            }
+        )
+
+    # Normalize "before_everything" and "after_everything".
+    for preposition in ('before', 'after'):
+        option_name = f'{preposition}_everything'
+        commands = config.pop(option_name, None)
+
+        if commands:
+            logs.append(make_command_hook_deprecation_log(config_filename, option_name))
+            config.setdefault('commands', []).append(
+                {
+                    preposition: 'everything',
+                    'when': ['create'],
+                    'run': commands,
+                }
+            )
+
+    return logs
+
+
 def normalize(config_filename, config):
     '''
     Given a configuration filename and a configuration dict of its loaded contents, apply particular
@@ -67,6 +151,7 @@ def normalize(config_filename, config):
     Raise ValueError the configuration cannot be normalized.
     '''
     logs = normalize_sections(config_filename, config)
+    logs += normalize_commands(config_filename, config)
 
     if config.get('borgmatic_source_directory'):
         logs.append(

+ 214 - 35
borgmatic/config/schema.yaml

@@ -796,8 +796,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before all
-            the actions for each repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before all the actions for each
+            repository.
         example:
             - "echo Starting actions."
     before_backup:
@@ -805,8 +806,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            creating a backup, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before creating a backup, run once
+            per repository.
         example:
             - "echo Starting a backup."
     before_prune:
@@ -814,8 +816,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            pruning, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before pruning, run once per
+            repository.
         example:
             - "echo Starting pruning."
     before_compact:
@@ -823,8 +826,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            compaction, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before compaction, run once per
+            repository.
         example:
             - "echo Starting compaction."
     before_check:
@@ -832,8 +836,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            consistency checks, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before consistency checks, run once
+            per repository.
         example:
             - "echo Starting checks."
     before_extract:
@@ -841,8 +846,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            extracting a backup, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before extracting a backup, run once
+            per repository.
         example:
             - "echo Starting extracting."
     after_backup:
@@ -850,8 +856,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            creating a backup, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after creating a backup, run once per
+            repository.
         example:
             - "echo Finished a backup."
     after_compact:
@@ -859,8 +866,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            compaction, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after compaction, run once per
+            repository.
         example:
             - "echo Finished compaction."
     after_prune:
@@ -868,8 +876,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            pruning, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after pruning, run once per
+            repository.
         example:
             - "echo Finished pruning."
     after_check:
@@ -877,8 +886,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            consistency checks, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after consistency checks, run once
+            per repository.
         example:
             - "echo Finished checks."
     after_extract:
@@ -886,8 +896,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            extracting a backup, run once per repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after extracting a backup, run once
+            per repository.
         example:
             - "echo Finished extracting."
     after_actions:
@@ -895,8 +906,9 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after all
-            actions for each repository.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after all actions for each
+            repository.
         example:
             - "echo Finished actions."
     on_error:
@@ -904,9 +916,10 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute when an
-            exception occurs during a "create", "prune", "compact", or "check"
-            action or an associated before/after hook.
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute when an exception occurs during a
+            "create", "prune", "compact", or "check" action or an associated
+            before/after hook.
         example:
             - "echo Error during create/prune/compact/check."
     before_everything:
@@ -914,10 +927,10 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute before
-            running all actions (if one of them is "create"). These are
-            collected from all configuration files and then run once before all
-            of them (prior to all actions).
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute before running all actions (if one of
+            them is "create"). These are collected from all configuration files
+            and then run once before all of them (prior to all actions).
         example:
             - "echo Starting actions."
     after_everything:
@@ -925,12 +938,178 @@ properties:
         items:
             type: string
         description: |
-            List of one or more shell commands or scripts to execute after
-            running all actions (if one of them is "create"). These are
-            collected from all configuration files and then run once after all
-            of them (after any action).
+            Deprecated. Use "commands:" instead. List of one or more shell
+            commands or scripts to execute after running all actions (if one of
+            them is "create"). These are collected from all configuration files
+            and then run once after all of them (after any action).
         example:
             - "echo Completed actions."
+    commands:
+        type: array
+        items:
+            type: object
+            oneOf:
+                - required: [before, run]
+                  additionalProperties: false
+                  properties:
+                      before:
+                          type: string
+                          enum:
+                              - action
+                              - repository
+                              - configuration
+                              - everything
+                              - dump_data_sources
+                          description: |
+                              Name for the point in borgmatic's execution that
+                              the commands should be run before (required if
+                              "after" isn't set):
+                               * "action" runs before each action for each
+                              repository.
+                               * "repository" runs before all actions for each
+                              repository.
+                               * "configuration" runs before all actions and
+                              repositories in the current configuration file.
+                               * "everything" runs before all configuration
+                              files.
+                               * "dump_data_sources" runs before each data
+                              source is dumped.
+                          example: action
+                      hooks:
+                          type: array
+                          items:
+                              type: string
+                          description: |
+                              List of names of other hooks that this command
+                              hook applies to. Defaults to all hooks of the
+                              relevant type. Only supported for the
+                              "dump_data_sources" hook.
+                          example: postgresql
+                      when:
+                          type: array
+                          items:
+                              type: string
+                              enum:
+                                  - repo-create
+                                  - transfer
+                                  - prune
+                                  - compact
+                                  - create
+                                  - check
+                                  - delete
+                                  - extract
+                                  - config
+                                  - export-tar
+                                  - mount
+                                  - umount
+                                  - repo-delete
+                                  - restore
+                                  - repo-list
+                                  - list
+                                  - repo-info
+                                  - info
+                                  - break-lock
+                                  - key
+                                  - borg
+                          description: |
+                              List of actions for which the commands will be
+                              run. Defaults to running for all actions. Ignored
+                              for "dump_data_sources", which by its nature only
+                              runs for "create".
+                          example: [create, prune, compact, check]
+                      run:
+                          type: array
+                          items:
+                              type: string
+                          description: |
+                              List of one or more shell commands or scripts to
+                              run when this command hook is triggered. Required.
+                          example:
+                              - "echo Doing stuff."
+                - required: [after, run]
+                  additionalProperties: false
+                  properties:
+                      after:
+                          type: string
+                          enum:
+                              - action
+                              - repository
+                              - configuration
+                              - everything
+                              - error
+                              - dump_data_sources
+                          description: |
+                              Name for the point in borgmatic's execution that
+                              the commands should be run after (required if
+                              "before" isn't set):
+                               * "action" runs after each action for each
+                              repository.
+                               * "repository" runs after all actions for each
+                              repository.
+                               * "configuration" runs after all actions and
+                              repositories in the current configuration file.
+                               * "everything" runs after all configuration
+                              files.
+                               * "error" runs after an error occurs.
+                               * "dump_data_sources" runs after each data
+                              source is dumped.
+                          example: action
+                      hooks:
+                          type: array
+                          items:
+                              type: string
+                          description: |
+                              List of names of other hooks that this command
+                              hook applies to. Defaults to all hooks of the
+                              relevant type. Only supported for the
+                              "dump_data_sources" hook.
+                          example: postgresql
+                      when:
+                          type: array
+                          items:
+                              type: string
+                              enum:
+                                  - repo-create
+                                  - transfer
+                                  - prune
+                                  - compact
+                                  - create
+                                  - check
+                                  - delete
+                                  - extract
+                                  - config
+                                  - export-tar
+                                  - mount
+                                  - umount
+                                  - repo-delete
+                                  - restore
+                                  - repo-list
+                                  - list
+                                  - repo-info
+                                  - info
+                                  - break-lock
+                                  - key
+                                  - borg
+                          description: |
+                              Only trigger the hook when borgmatic is run with
+                              particular actions listed here. Defaults to
+                              running for all actions. Ignored for
+                              "dump_data_sources", which by its nature only runs
+                              for "create".
+                          example: [create, prune, compact, check]
+                      run:
+                          type: array
+                          items:
+                              type: string
+                          description: |
+                              List of one or more shell commands or scripts to
+                              run when this command hook is triggered. Required.
+                          example:
+                              - "echo Doing stuff."
+        description: |
+            List of one or more command hooks to execute, triggered at
+            particular points during borgmatic's execution. For each command
+            hook, specify one of "before" or "after", not both.
     bootstrap:
         type: object
         properties:

+ 160 - 36
borgmatic/hooks/command.py

@@ -2,9 +2,11 @@ import logging
 import os
 import re
 import shlex
+import subprocess
 import sys
 
 import borgmatic.execute
+import borgmatic.logger
 
 logger = logging.getLogger(__name__)
 
@@ -44,54 +46,176 @@ def make_environment(current_environment, sys_module=sys):
     return environment
 
 
-def execute_hook(commands, umask, config_filename, description, dry_run, **context):
+def filter_hooks(command_hooks, before=None, after=None, hook_name=None, action_names=None):
     '''
-    Given a list of hook commands to execute, a umask to execute with (or None), a config filename,
-    a hook description, and whether this is a dry run, run the given commands. Or, don't run them
-    if this is a dry run.
+    Given a sequence of command hook dicts from configuration and one or more filters (before name,
+    after name, calling hook name, or a sequence of action names), filter down the command hooks to
+    just the ones that match the given filters.
+    '''
+    return tuple(
+        hook_config
+        for hook_config in command_hooks or ()
+        for config_hook_names in (hook_config.get('hooks'),)
+        for config_action_names in (hook_config.get('when'),)
+        if before is None or hook_config.get('before') == before
+        if after is None or hook_config.get('after') == after
+        if hook_name is None or config_hook_names is None or hook_name in config_hook_names
+        if action_names is None
+        or config_action_names is None
+        or set(config_action_names or ()).intersection(set(action_names))
+    )
+
+
+def execute_hooks(command_hooks, umask, dry_run, **context):
+    '''
+    Given a sequence of command hook dicts from configuration, a umask to execute with (or None),
+    and whether this is a dry run, run the commands for each hook. Or don't run them if this is a
+    dry run.
 
     The context contains optional values interpolated by name into the hook commands.
 
-    Raise ValueError if the umask cannot be parsed.
+    Raise ValueError if the umask cannot be parsed or a hook is invalid.
     Raise subprocesses.CalledProcessError if an error occurs in a hook.
     '''
-    if not commands:
-        logger.debug(f'No commands to run for {description} hook')
-        return
+    borgmatic.logger.add_custom_log_levels()
 
     dry_run_label = ' (dry run; not actually running hooks)' if dry_run else ''
 
-    context['configuration_filename'] = config_filename
-    commands = [interpolate_context(description, command, context) for command in commands]
+    for hook_config in command_hooks:
+        commands = hook_config.get('run')
 
-    if len(commands) == 1:
-        logger.info(f'Running command for {description} hook{dry_run_label}')
-    else:
-        logger.info(
-            f'Running {len(commands)} commands for {description} hook{dry_run_label}',
-        )
+        if 'before' in hook_config:
+            description = f'before {hook_config.get("before")}'
+        elif 'after' in hook_config:
+            description = f'after {hook_config.get("after")}'
+        else:
+            raise ValueError(f'Invalid hook configuration: {hook_config}')
 
-    if umask:
-        parsed_umask = int(str(umask), 8)
-        logger.debug(f'Set hook umask to {oct(parsed_umask)}')
-        original_umask = os.umask(parsed_umask)
-    else:
-        original_umask = None
-
-    try:
-        for command in commands:
-            if dry_run:
-                continue
-
-            borgmatic.execute.execute_command(
-                [command],
-                output_log_level=(logging.ERROR if description == 'on-error' else logging.WARNING),
-                shell=True,
-                environment=make_environment(os.environ),
+        if not commands:
+            logger.debug(f'No commands to run for {description} hook')
+            continue
+
+        commands = [interpolate_context(description, command, context) for command in commands]
+
+        if len(commands) == 1:
+            logger.info(f'Running {description} command hook{dry_run_label}')
+        else:
+            logger.info(
+                f'Running {len(commands)} commands for {description} hook{dry_run_label}',
             )
-    finally:
-        if original_umask:
-            os.umask(original_umask)
+
+        if umask:
+            parsed_umask = int(str(umask), 8)
+            logger.debug(f'Setting hook umask to {oct(parsed_umask)}')
+            original_umask = os.umask(parsed_umask)
+        else:
+            original_umask = None
+
+        try:
+            for command in commands:
+                if dry_run:
+                    continue
+
+                borgmatic.execute.execute_command(
+                    [command],
+                    output_log_level=(
+                        logging.ERROR if hook_config.get('after') == 'error' else logging.ANSWER
+                    ),
+                    shell=True,
+                    environment=make_environment(os.environ),
+                )
+        finally:
+            if original_umask:
+                os.umask(original_umask)
+
+
+class Before_after_hooks:
+    '''
+    A Python context manager for executing command hooks both before and after the wrapped code.
+
+    Example use as a context manager:
+
+       with borgmatic.hooks.command.Before_after_hooks(
+           command_hooks=config.get('commands'),
+           before_after='do_stuff',
+           umask=config.get('umask'),
+           dry_run=dry_run,
+           hook_name='myhook',
+       ):
+            do()
+            some()
+            stuff()
+
+    With that context manager in place, "before" command hooks execute before the wrapped code runs,
+    and "after" command hooks execute after the wrapped code completes.
+    '''
+
+    def __init__(
+        self,
+        command_hooks,
+        before_after,
+        umask,
+        dry_run,
+        hook_name=None,
+        action_names=None,
+        **context,
+    ):
+        '''
+        Given a sequence of command hook configuration dicts, the before/after name, a umask to run
+        commands with, a dry run flag, the name of the calling hook, a sequence of action names, and
+        any context for the executed commands, save those data points for use below.
+        '''
+        self.command_hooks = command_hooks
+        self.before_after = before_after
+        self.umask = umask
+        self.dry_run = dry_run
+        self.hook_name = hook_name
+        self.action_names = action_names
+        self.context = context
+
+    def __enter__(self):
+        '''
+        Run the configured "before" command hooks that match the initialized data points.
+        '''
+        try:
+            execute_hooks(
+                borgmatic.hooks.command.filter_hooks(
+                    self.command_hooks,
+                    before=self.before_after,
+                    hook_name=self.hook_name,
+                    action_names=self.action_names,
+                ),
+                self.umask,
+                self.dry_run,
+                **self.context,
+            )
+        except (OSError, subprocess.CalledProcessError) as error:
+            if considered_soft_failure(error):
+                return
+
+            raise ValueError(f'Error running before {self.before_after} hook: {error}')
+
+    def __exit__(self, exception, value, traceback):
+        '''
+        Run the configured "after" command hooks that match the initialized data points.
+        '''
+        try:
+            execute_hooks(
+                borgmatic.hooks.command.filter_hooks(
+                    self.command_hooks,
+                    after=self.before_after,
+                    hook_name=self.hook_name,
+                    action_names=self.action_names,
+                ),
+                self.umask,
+                self.dry_run,
+                **self.context,
+            )
+        except (OSError, subprocess.CalledProcessError) as error:
+            if considered_soft_failure(error):
+                return
+
+            raise ValueError(f'Error running before {self.before_after} hook: {error}')
 
 
 def considered_soft_failure(error):

+ 35 - 27
borgmatic/hooks/data_source/bootstrap.py

@@ -6,6 +6,7 @@ import os
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 
 logger = logging.getLogger(__name__)
 
@@ -37,38 +38,45 @@ def dump_data_sources(
     if hook_config and hook_config.get('store_config_files') is False:
         return []
 
-    borgmatic_manifest_path = os.path.join(
-        borgmatic_runtime_directory, 'bootstrap', 'manifest.json'
-    )
-
-    if dry_run:
-        return []
-
-    os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
-
-    with open(borgmatic_manifest_path, 'w') as manifest_file:
-        json.dump(
-            {
-                'borgmatic_version': importlib.metadata.version('borgmatic'),
-                'config_paths': config_paths,
-            },
-            manifest_file,
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='bootstrap',
+    ):
+        borgmatic_manifest_path = os.path.join(
+            borgmatic_runtime_directory, 'bootstrap', 'manifest.json'
         )
 
-    patterns.extend(
-        borgmatic.borg.pattern.Pattern(
-            config_path, source=borgmatic.borg.pattern.Pattern_source.HOOK
+        if dry_run:
+            return []
+
+        os.makedirs(os.path.dirname(borgmatic_manifest_path), exist_ok=True)
+
+        with open(borgmatic_manifest_path, 'w') as manifest_file:
+            json.dump(
+                {
+                    'borgmatic_version': importlib.metadata.version('borgmatic'),
+                    'config_paths': config_paths,
+                },
+                manifest_file,
+            )
+
+        patterns.extend(
+            borgmatic.borg.pattern.Pattern(
+                config_path, source=borgmatic.borg.pattern.Pattern_source.HOOK
+            )
+            for config_path in config_paths
         )
-        for config_path in config_paths
-    )
-    patterns.append(
-        borgmatic.borg.pattern.Pattern(
-            os.path.join(borgmatic_runtime_directory, 'bootstrap'),
-            source=borgmatic.borg.pattern.Pattern_source.HOOK,
+        patterns.append(
+            borgmatic.borg.pattern.Pattern(
+                os.path.join(borgmatic_runtime_directory, 'bootstrap'),
+                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+            )
         )
-    )
 
-    return []
+        return []
 
 
 def remove_data_source_dumps(hook_config, config, borgmatic_runtime_directory, dry_run):

+ 33 - 25
borgmatic/hooks/data_source/btrfs.py

@@ -9,6 +9,7 @@ import subprocess
 import borgmatic.borg.pattern
 import borgmatic.config.paths
 import borgmatic.execute
+import borgmatic.hooks.command
 import borgmatic.hooks.data_source.snapshot
 
 logger = logging.getLogger(__name__)
@@ -249,41 +250,48 @@ def dump_data_sources(
 
     If this is a dry run, then don't actually snapshot anything.
     '''
-    dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'Snapshotting Btrfs subvolumes{dry_run_label}')
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='btrfs',
+    ):
+        dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
+        logger.info(f'Snapshotting Btrfs subvolumes{dry_run_label}')
 
-    # Based on the configured patterns, determine Btrfs subvolumes to backup. Only consider those
-    # patterns that came from actual user configuration (as opposed to, say, other hooks).
-    btrfs_command = hook_config.get('btrfs_command', 'btrfs')
-    findmnt_command = hook_config.get('findmnt_command', 'findmnt')
-    subvolumes = get_subvolumes(btrfs_command, findmnt_command, patterns)
+        # Based on the configured patterns, determine Btrfs subvolumes to backup. Only consider those
+        # patterns that came from actual user configuration (as opposed to, say, other hooks).
+        btrfs_command = hook_config.get('btrfs_command', 'btrfs')
+        findmnt_command = hook_config.get('findmnt_command', 'findmnt')
+        subvolumes = get_subvolumes(btrfs_command, findmnt_command, patterns)
 
-    if not subvolumes:
-        logger.warning(f'No Btrfs subvolumes found to snapshot{dry_run_label}')
+        if not subvolumes:
+            logger.warning(f'No Btrfs subvolumes found to snapshot{dry_run_label}')
 
-    # Snapshot each subvolume, rewriting patterns to use their snapshot paths.
-    for subvolume in subvolumes:
-        logger.debug(f'Creating Btrfs snapshot for {subvolume.path} subvolume')
+        # Snapshot each subvolume, rewriting patterns to use their snapshot paths.
+        for subvolume in subvolumes:
+            logger.debug(f'Creating Btrfs snapshot for {subvolume.path} subvolume')
 
-        snapshot_path = make_snapshot_path(subvolume.path)
+            snapshot_path = make_snapshot_path(subvolume.path)
 
-        if dry_run:
-            continue
+            if dry_run:
+                continue
 
-        snapshot_subvolume(btrfs_command, subvolume.path, snapshot_path)
+            snapshot_subvolume(btrfs_command, subvolume.path, snapshot_path)
 
-        for pattern in subvolume.contained_patterns:
-            snapshot_pattern = make_borg_snapshot_pattern(subvolume.path, pattern)
+            for pattern in subvolume.contained_patterns:
+                snapshot_pattern = make_borg_snapshot_pattern(subvolume.path, pattern)
 
-            # Attempt to update the pattern in place, since pattern order matters to Borg.
-            try:
-                patterns[patterns.index(pattern)] = snapshot_pattern
-            except ValueError:
-                patterns.append(snapshot_pattern)
+                # Attempt to update the pattern in place, since pattern order matters to Borg.
+                try:
+                    patterns[patterns.index(pattern)] = snapshot_pattern
+                except ValueError:
+                    patterns.append(snapshot_pattern)
 
-        patterns.append(make_snapshot_exclude_pattern(subvolume.path))
+            patterns.append(make_snapshot_exclude_pattern(subvolume.path))
 
-    return []
+        return []
 
 
 def delete_snapshot(btrfs_command, snapshot_path):  # pragma: no cover

+ 70 - 62
borgmatic/hooks/data_source/lvm.py

@@ -10,6 +10,7 @@ import subprocess
 import borgmatic.borg.pattern
 import borgmatic.config.paths
 import borgmatic.execute
+import borgmatic.hooks.command
 import borgmatic.hooks.data_source.snapshot
 
 logger = logging.getLogger(__name__)
@@ -197,77 +198,84 @@ def dump_data_sources(
 
     If this is a dry run, then don't actually snapshot anything.
     '''
-    dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'Snapshotting LVM logical volumes{dry_run_label}')
-
-    # List logical volumes to get their mount points, but only consider those patterns that came
-    # from actual user configuration (as opposed to, say, other hooks).
-    lsblk_command = hook_config.get('lsblk_command', 'lsblk')
-    requested_logical_volumes = get_logical_volumes(lsblk_command, patterns)
-
-    # Snapshot each logical volume, rewriting source directories to use the snapshot paths.
-    snapshot_suffix = f'{BORGMATIC_SNAPSHOT_PREFIX}{os.getpid()}'
-    normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
-
-    if not requested_logical_volumes:
-        logger.warning(f'No LVM logical volumes found to snapshot{dry_run_label}')
-
-    for logical_volume in requested_logical_volumes:
-        snapshot_name = f'{logical_volume.name}_{snapshot_suffix}'
-        logger.debug(
-            f'Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}'
-        )
-
-        if not dry_run:
-            snapshot_logical_volume(
-                hook_config.get('lvcreate_command', 'lvcreate'),
-                snapshot_name,
-                logical_volume.device_path,
-                hook_config.get('snapshot_size', DEFAULT_SNAPSHOT_SIZE),
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        function_name='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='lvm',
+    ):
+        dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
+        logger.info(f'Snapshotting LVM logical volumes{dry_run_label}')
+
+        # List logical volumes to get their mount points, but only consider those patterns that came
+        # from actual user configuration (as opposed to, say, other hooks).
+        lsblk_command = hook_config.get('lsblk_command', 'lsblk')
+        requested_logical_volumes = get_logical_volumes(lsblk_command, patterns)
+
+        # Snapshot each logical volume, rewriting source directories to use the snapshot paths.
+        snapshot_suffix = f'{BORGMATIC_SNAPSHOT_PREFIX}{os.getpid()}'
+        normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
+
+        if not requested_logical_volumes:
+            logger.warning(f'No LVM logical volumes found to snapshot{dry_run_label}')
+
+        for logical_volume in requested_logical_volumes:
+            snapshot_name = f'{logical_volume.name}_{snapshot_suffix}'
+            logger.debug(
+                f'Creating LVM snapshot {snapshot_name} of {logical_volume.mount_point}{dry_run_label}'
             )
 
-        # Get the device path for the snapshot we just created.
-        try:
-            snapshot = get_snapshots(
-                hook_config.get('lvs_command', 'lvs'), snapshot_name=snapshot_name
-            )[0]
-        except IndexError:
-            raise ValueError(f'Cannot find LVM snapshot {snapshot_name}')
-
-        # Mount the snapshot into a particular named temporary directory so that the snapshot ends
-        # up in the Borg archive at the "original" logical volume mount point path.
-        snapshot_mount_path = os.path.join(
-            normalized_runtime_directory,
-            'lvm_snapshots',
-            hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
-                MOUNT_POINT_HASH_LENGTH
-            ),
-            logical_volume.mount_point.lstrip(os.path.sep),
-        )
+            if not dry_run:
+                snapshot_logical_volume(
+                    hook_config.get('lvcreate_command', 'lvcreate'),
+                    snapshot_name,
+                    logical_volume.device_path,
+                    hook_config.get('snapshot_size', DEFAULT_SNAPSHOT_SIZE),
+                )
 
-        logger.debug(
-            f'Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}'
-        )
+            # Get the device path for the snapshot we just created.
+            try:
+                snapshot = get_snapshots(
+                    hook_config.get('lvs_command', 'lvs'), snapshot_name=snapshot_name
+                )[0]
+            except IndexError:
+                raise ValueError(f'Cannot find LVM snapshot {snapshot_name}')
+
+            # Mount the snapshot into a particular named temporary directory so that the snapshot ends
+            # up in the Borg archive at the "original" logical volume mount point path.
+            snapshot_mount_path = os.path.join(
+                normalized_runtime_directory,
+                'lvm_snapshots',
+                hashlib.shake_256(logical_volume.mount_point.encode('utf-8')).hexdigest(
+                    MOUNT_POINT_HASH_LENGTH
+                ),
+                logical_volume.mount_point.lstrip(os.path.sep),
+            )
 
-        if dry_run:
-            continue
+            logger.debug(
+                f'Mounting LVM snapshot {snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            )
 
-        mount_snapshot(
-            hook_config.get('mount_command', 'mount'), snapshot.device_path, snapshot_mount_path
-        )
+            if dry_run:
+                continue
 
-        for pattern in logical_volume.contained_patterns:
-            snapshot_pattern = make_borg_snapshot_pattern(
-                pattern, logical_volume, normalized_runtime_directory
+            mount_snapshot(
+                hook_config.get('mount_command', 'mount'), snapshot.device_path, snapshot_mount_path
             )
 
-            # Attempt to update the pattern in place, since pattern order matters to Borg.
-            try:
-                patterns[patterns.index(pattern)] = snapshot_pattern
-            except ValueError:
-                patterns.append(snapshot_pattern)
+            for pattern in logical_volume.contained_patterns:
+                snapshot_pattern = make_borg_snapshot_pattern(
+                    pattern, logical_volume, normalized_runtime_directory
+                )
+
+                # Attempt to update the pattern in place, since pattern order matters to Borg.
+                try:
+                    patterns[patterns.index(pattern)] = snapshot_pattern
+                except ValueError:
+                    patterns.append(snapshot_pattern)
 
-    return []
+        return []
 
 
 def unmount_snapshot(umount_command, snapshot_mount_path):  # pragma: no cover

+ 58 - 50
borgmatic/hooks/data_source/mariadb.py

@@ -6,6 +6,7 @@ import shlex
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 import borgmatic.hooks.credential.parse
 from borgmatic.execute import (
     execute_command,
@@ -242,71 +243,78 @@ def dump_data_sources(
     Also append the the parent directory of the database dumps to the given patterns list, so the
     dumps actually get backed up.
     '''
-    dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
-    processes = []
-
-    logger.info(f'Dumping MariaDB databases{dry_run_label}')
-
-    for database in databases:
-        dump_path = make_dump_path(borgmatic_runtime_directory)
-        username = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('username'), config
-        )
-        password = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('password'), config
-        )
-        environment = dict(os.environ)
-        dump_database_names = database_names_to_dump(
-            database, config, username, password, environment, dry_run
-        )
-
-        if not dump_database_names:
-            if dry_run:
-                continue
-
-            raise ValueError('Cannot find any MariaDB databases to dump.')
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='mariadb',
+    ):
+        dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
+        processes = []
+
+        logger.info(f'Dumping MariaDB databases{dry_run_label}')
+
+        for database in databases:
+            dump_path = make_dump_path(borgmatic_runtime_directory)
+            username = borgmatic.hooks.credential.parse.resolve_credential(
+                database.get('username'), config
+            )
+            password = borgmatic.hooks.credential.parse.resolve_credential(
+                database.get('password'), config
+            )
+            environment = dict(os.environ)
+            dump_database_names = database_names_to_dump(
+                database, config, username, password, environment, dry_run
+            )
 
-        if database['name'] == 'all' and database.get('format'):
-            for dump_name in dump_database_names:
-                renamed_database = copy.copy(database)
-                renamed_database['name'] = dump_name
+            if not dump_database_names:
+                if dry_run:
+                    continue
+
+                raise ValueError('Cannot find any MariaDB databases to dump.')
+
+            if database['name'] == 'all' and database.get('format'):
+                for dump_name in dump_database_names:
+                    renamed_database = copy.copy(database)
+                    renamed_database['name'] = dump_name
+                    processes.append(
+                        execute_dump_command(
+                            renamed_database,
+                            config,
+                            username,
+                            password,
+                            dump_path,
+                            (dump_name,),
+                            environment,
+                            dry_run,
+                            dry_run_label,
+                        )
+                    )
+            else:
                 processes.append(
                     execute_dump_command(
-                        renamed_database,
+                        database,
                         config,
                         username,
                         password,
                         dump_path,
-                        (dump_name,),
+                        dump_database_names,
                         environment,
                         dry_run,
                         dry_run_label,
                     )
                 )
-        else:
-            processes.append(
-                execute_dump_command(
-                    database,
-                    config,
-                    username,
-                    password,
-                    dump_path,
-                    dump_database_names,
-                    environment,
-                    dry_run,
-                    dry_run_label,
-                )
-            )
 
-    if not dry_run:
-        patterns.append(
-            borgmatic.borg.pattern.Pattern(
-                os.path.join(borgmatic_runtime_directory, 'mariadb_databases'),
-                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+        if not dry_run:
+            patterns.append(
+                borgmatic.borg.pattern.Pattern(
+                    os.path.join(borgmatic_runtime_directory, 'mariadb_databases'),
+                    source=borgmatic.borg.pattern.Pattern_source.HOOK,
+                )
             )
-        )
 
-    return [process for process in processes if process]
+        return [process for process in processes if process]
 
 
 def remove_data_source_dumps(

+ 45 - 36
borgmatic/hooks/data_source/mongodb.py

@@ -4,6 +4,7 @@ import shlex
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 import borgmatic.hooks.credential.parse
 from borgmatic.execute import execute_command, execute_command_with_processes
 from borgmatic.hooks.data_source import dump
@@ -48,45 +49,53 @@ def dump_data_sources(
     Also append the the parent directory of the database dumps to the given patterns list, so the
     dumps actually get backed up.
     '''
-    dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
-
-    logger.info(f'Dumping MongoDB databases{dry_run_label}')
-
-    processes = []
-    for database in databases:
-        name = database['name']
-        dump_filename = dump.make_data_source_dump_filename(
-            make_dump_path(borgmatic_runtime_directory),
-            name,
-            database.get('hostname'),
-            database.get('port'),
-        )
-        dump_format = database.get('format', 'archive')
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='mongodb',
+    ):
+        dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
+
+        logger.info(f'Dumping MongoDB databases{dry_run_label}')
+
+        processes = []
+
+        for database in databases:
+            name = database['name']
+            dump_filename = dump.make_data_source_dump_filename(
+                make_dump_path(borgmatic_runtime_directory),
+                name,
+                database.get('hostname'),
+                database.get('port'),
+            )
+            dump_format = database.get('format', 'archive')
 
-        logger.debug(
-            f'Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
-        )
-        if dry_run:
-            continue
-
-        command = build_dump_command(database, config, dump_filename, dump_format)
-
-        if dump_format == 'directory':
-            dump.create_parent_directory_for_dump(dump_filename)
-            execute_command(command, shell=True)
-        else:
-            dump.create_named_pipe_for_dump(dump_filename)
-            processes.append(execute_command(command, shell=True, run_to_completion=False))
-
-    if not dry_run:
-        patterns.append(
-            borgmatic.borg.pattern.Pattern(
-                os.path.join(borgmatic_runtime_directory, 'mongodb_databases'),
-                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+            logger.debug(
+                f'Dumping MongoDB database {name} to {dump_filename}{dry_run_label}',
+            )
+            if dry_run:
+                continue
+
+            command = build_dump_command(database, config, dump_filename, dump_format)
+
+            if dump_format == 'directory':
+                dump.create_parent_directory_for_dump(dump_filename)
+                execute_command(command, shell=True)
+            else:
+                dump.create_named_pipe_for_dump(dump_filename)
+                processes.append(execute_command(command, shell=True, run_to_completion=False))
+
+        if not dry_run:
+            patterns.append(
+                borgmatic.borg.pattern.Pattern(
+                    os.path.join(borgmatic_runtime_directory, 'mongodb_databases'),
+                    source=borgmatic.borg.pattern.Pattern_source.HOOK,
+                )
             )
-        )
 
-    return processes
+        return processes
 
 
 def make_password_config_file(password):

+ 58 - 50
borgmatic/hooks/data_source/mysql.py

@@ -5,6 +5,7 @@ import shlex
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 import borgmatic.hooks.credential.parse
 import borgmatic.hooks.data_source.mariadb
 from borgmatic.execute import (
@@ -169,71 +170,78 @@ def dump_data_sources(
     Also append the the parent directory of the database dumps to the given patterns list, so the
     dumps actually get backed up.
     '''
-    dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
-    processes = []
-
-    logger.info(f'Dumping MySQL databases{dry_run_label}')
-
-    for database in databases:
-        dump_path = make_dump_path(borgmatic_runtime_directory)
-        username = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('username'), config
-        )
-        password = borgmatic.hooks.credential.parse.resolve_credential(
-            database.get('password'), config
-        )
-        environment = dict(os.environ)
-        dump_database_names = database_names_to_dump(
-            database, config, username, password, environment, dry_run
-        )
-
-        if not dump_database_names:
-            if dry_run:
-                continue
-
-            raise ValueError('Cannot find any MySQL databases to dump.')
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='mysql',
+    ):
+        dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
+        processes = []
+
+        logger.info(f'Dumping MySQL databases{dry_run_label}')
+
+        for database in databases:
+            dump_path = make_dump_path(borgmatic_runtime_directory)
+            username = borgmatic.hooks.credential.parse.resolve_credential(
+                database.get('username'), config
+            )
+            password = borgmatic.hooks.credential.parse.resolve_credential(
+                database.get('password'), config
+            )
+            environment = dict(os.environ)
+            dump_database_names = database_names_to_dump(
+                database, config, username, password, environment, dry_run
+            )
 
-        if database['name'] == 'all' and database.get('format'):
-            for dump_name in dump_database_names:
-                renamed_database = copy.copy(database)
-                renamed_database['name'] = dump_name
+            if not dump_database_names:
+                if dry_run:
+                    continue
+
+                raise ValueError('Cannot find any MySQL databases to dump.')
+
+            if database['name'] == 'all' and database.get('format'):
+                for dump_name in dump_database_names:
+                    renamed_database = copy.copy(database)
+                    renamed_database['name'] = dump_name
+                    processes.append(
+                        execute_dump_command(
+                            renamed_database,
+                            config,
+                            username,
+                            password,
+                            dump_path,
+                            (dump_name,),
+                            environment,
+                            dry_run,
+                            dry_run_label,
+                        )
+                    )
+            else:
                 processes.append(
                     execute_dump_command(
-                        renamed_database,
+                        database,
                         config,
                         username,
                         password,
                         dump_path,
-                        (dump_name,),
+                        dump_database_names,
                         environment,
                         dry_run,
                         dry_run_label,
                     )
                 )
-        else:
-            processes.append(
-                execute_dump_command(
-                    database,
-                    config,
-                    username,
-                    password,
-                    dump_path,
-                    dump_database_names,
-                    environment,
-                    dry_run,
-                    dry_run_label,
-                )
-            )
 
-    if not dry_run:
-        patterns.append(
-            borgmatic.borg.pattern.Pattern(
-                os.path.join(borgmatic_runtime_directory, 'mysql_databases'),
-                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+        if not dry_run:
+            patterns.append(
+                borgmatic.borg.pattern.Pattern(
+                    os.path.join(borgmatic_runtime_directory, 'mysql_databases'),
+                    source=borgmatic.borg.pattern.Pattern_source.HOOK,
+                )
             )
-        )
 
-    return [process for process in processes if process]
+        return [process for process in processes if process]
 
 
 def remove_data_source_dumps(

+ 108 - 92
borgmatic/hooks/data_source/postgresql.py

@@ -7,6 +7,7 @@ import shlex
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 import borgmatic.hooks.credential.parse
 from borgmatic.execute import (
     execute_command,
@@ -141,112 +142,127 @@ def dump_data_sources(
 
     Raise ValueError if the databases to dump cannot be determined.
     '''
-    dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
-    processes = []
-
-    logger.info(f'Dumping PostgreSQL databases{dry_run_label}')
-
-    for database in databases:
-        environment = make_environment(database, config)
-        dump_path = make_dump_path(borgmatic_runtime_directory)
-        dump_database_names = database_names_to_dump(database, config, environment, dry_run)
-
-        if not dump_database_names:
-            if dry_run:
-                continue
-
-            raise ValueError('Cannot find any PostgreSQL databases to dump.')
-
-        for database_name in dump_database_names:
-            dump_format = database.get('format', None if database_name == 'all' else 'custom')
-            compression = database.get('compression')
-            default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
-            dump_command = tuple(
-                shlex.quote(part)
-                for part in shlex.split(database.get('pg_dump_command') or default_dump_command)
-            )
-            dump_filename = dump.make_data_source_dump_filename(
-                dump_path,
-                database_name,
-                database.get('hostname'),
-                database.get('port'),
-            )
-            if os.path.exists(dump_filename):
-                logger.warning(
-                    f'Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='postgresql',
+    ):
+        dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
+        processes = []
+
+        logger.info(f'Dumping PostgreSQL databases{dry_run_label}')
+
+        for database in databases:
+            environment = make_environment(database, config)
+            dump_path = make_dump_path(borgmatic_runtime_directory)
+            dump_database_names = database_names_to_dump(database, config, environment, dry_run)
+
+            if not dump_database_names:
+                if dry_run:
+                    continue
+
+                raise ValueError('Cannot find any PostgreSQL databases to dump.')
+
+            for database_name in dump_database_names:
+                dump_format = database.get('format', None if database_name == 'all' else 'custom')
+                compression = database.get('compression')
+                default_dump_command = 'pg_dumpall' if database_name == 'all' else 'pg_dump'
+                dump_command = tuple(
+                    shlex.quote(part)
+                    for part in shlex.split(database.get('pg_dump_command') or default_dump_command)
                 )
-                continue
-
-            command = (
-                dump_command
-                + (
-                    '--no-password',
-                    '--clean',
-                    '--if-exists',
+                dump_filename = dump.make_data_source_dump_filename(
+                    dump_path,
+                    database_name,
+                    database.get('hostname'),
+                    database.get('port'),
                 )
-                + (('--host', shlex.quote(database['hostname'])) if 'hostname' in database else ())
-                + (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
-                + (
-                    (
-                        '--username',
-                        shlex.quote(
-                            borgmatic.hooks.credential.parse.resolve_credential(
-                                database['username'], config
-                            )
-                        ),
+                if os.path.exists(dump_filename):
+                    logger.warning(
+                        f'Skipping duplicate dump of PostgreSQL database "{database_name}" to {dump_filename}'
                     )
-                    if 'username' in database
-                    else ()
-                )
-                + (('--no-owner',) if database.get('no_owner', False) else ())
-                + (('--format', shlex.quote(dump_format)) if dump_format else ())
-                + (('--compress', shlex.quote(str(compression))) if compression is not None else ())
-                + (('--file', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
-                + (
-                    tuple(shlex.quote(option) for option in database['options'].split(' '))
-                    if 'options' in database
-                    else ()
+                    continue
+
+                command = (
+                    dump_command
+                    + (
+                        '--no-password',
+                        '--clean',
+                        '--if-exists',
+                    )
+                    + (
+                        ('--host', shlex.quote(database['hostname']))
+                        if 'hostname' in database
+                        else ()
+                    )
+                    + (('--port', shlex.quote(str(database['port']))) if 'port' in database else ())
+                    + (
+                        (
+                            '--username',
+                            shlex.quote(
+                                borgmatic.hooks.credential.parse.resolve_credential(
+                                    database['username'], config
+                                )
+                            ),
+                        )
+                        if 'username' in database
+                        else ()
+                    )
+                    + (('--no-owner',) if database.get('no_owner', False) else ())
+                    + (('--format', shlex.quote(dump_format)) if dump_format else ())
+                    + (
+                        ('--compress', shlex.quote(str(compression)))
+                        if compression is not None
+                        else ()
+                    )
+                    + (('--file', shlex.quote(dump_filename)) if dump_format == 'directory' else ())
+                    + (
+                        tuple(shlex.quote(option) for option in database['options'].split(' '))
+                        if 'options' in database
+                        else ()
+                    )
+                    + (() if database_name == 'all' else (shlex.quote(database_name),))
+                    # Use shell redirection rather than the --file flag to sidestep synchronization issues
+                    # when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
+                    # format in a particular, a named destination is required, and redirection doesn't work.
+                    + (('>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
                 )
-                + (() if database_name == 'all' else (shlex.quote(database_name),))
-                # Use shell redirection rather than the --file flag to sidestep synchronization issues
-                # when pg_dump/pg_dumpall tries to write to a named pipe. But for the directory dump
-                # format in a particular, a named destination is required, and redirection doesn't work.
-                + (('>', shlex.quote(dump_filename)) if dump_format != 'directory' else ())
-            )
 
-            logger.debug(
-                f'Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
-            )
-            if dry_run:
-                continue
-
-            if dump_format == 'directory':
-                dump.create_parent_directory_for_dump(dump_filename)
-                execute_command(
-                    command,
-                    shell=True,
-                    environment=environment,
+                logger.debug(
+                    f'Dumping PostgreSQL database "{database_name}" to {dump_filename}{dry_run_label}'
                 )
-            else:
-                dump.create_named_pipe_for_dump(dump_filename)
-                processes.append(
+                if dry_run:
+                    continue
+
+                if dump_format == 'directory':
+                    dump.create_parent_directory_for_dump(dump_filename)
                     execute_command(
                         command,
                         shell=True,
                         environment=environment,
-                        run_to_completion=False,
                     )
-                )
+                else:
+                    dump.create_named_pipe_for_dump(dump_filename)
+                    processes.append(
+                        execute_command(
+                            command,
+                            shell=True,
+                            environment=environment,
+                            run_to_completion=False,
+                        )
+                    )
 
-    if not dry_run:
-        patterns.append(
-            borgmatic.borg.pattern.Pattern(
-                os.path.join(borgmatic_runtime_directory, 'postgresql_databases'),
-                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+        if not dry_run:
+            patterns.append(
+                borgmatic.borg.pattern.Pattern(
+                    os.path.join(borgmatic_runtime_directory, 'postgresql_databases'),
+                    source=borgmatic.borg.pattern.Pattern_source.HOOK,
+                )
             )
-        )
 
-    return processes
+        return processes
 
 
 def remove_data_source_dumps(

+ 53 - 45
borgmatic/hooks/data_source/sqlite.py

@@ -4,6 +4,7 @@ import shlex
 
 import borgmatic.borg.pattern
 import borgmatic.config.paths
+import borgmatic.hooks.command
 from borgmatic.execute import execute_command, execute_command_with_processes
 from borgmatic.hooks.data_source import dump
 
@@ -47,55 +48,62 @@ def dump_data_sources(
     Also append the the parent directory of the database dumps to the given patterns list, so the
     dumps actually get backed up.
     '''
-    dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
-    processes = []
-
-    logger.info(f'Dumping SQLite databases{dry_run_label}')
-
-    for database in databases:
-        database_path = database['path']
-
-        if database['name'] == 'all':
-            logger.warning('The "all" database name has no meaning for SQLite databases')
-        if not os.path.exists(database_path):
-            logger.warning(
-                f'No SQLite database at {database_path}; an empty database will be created and dumped'
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='sqlite',
+    ):
+        dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
+        processes = []
+
+        logger.info(f'Dumping SQLite databases{dry_run_label}')
+
+        for database in databases:
+            database_path = database['path']
+
+            if database['name'] == 'all':
+                logger.warning('The "all" database name has no meaning for SQLite databases')
+            if not os.path.exists(database_path):
+                logger.warning(
+                    f'No SQLite database at {database_path}; an empty database will be created and dumped'
+                )
+
+            dump_path = make_dump_path(borgmatic_runtime_directory)
+            dump_filename = dump.make_data_source_dump_filename(dump_path, database['name'])
+
+            if os.path.exists(dump_filename):
+                logger.warning(
+                    f'Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
+                )
+                continue
+
+            command = (
+                'sqlite3',
+                shlex.quote(database_path),
+                '.dump',
+                '>',
+                shlex.quote(dump_filename),
             )
-
-        dump_path = make_dump_path(borgmatic_runtime_directory)
-        dump_filename = dump.make_data_source_dump_filename(dump_path, database['name'])
-
-        if os.path.exists(dump_filename):
-            logger.warning(
-                f'Skipping duplicate dump of SQLite database at {database_path} to {dump_filename}'
+            logger.debug(
+                f'Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
             )
-            continue
-
-        command = (
-            'sqlite3',
-            shlex.quote(database_path),
-            '.dump',
-            '>',
-            shlex.quote(dump_filename),
-        )
-        logger.debug(
-            f'Dumping SQLite database at {database_path} to {dump_filename}{dry_run_label}'
-        )
-        if dry_run:
-            continue
-
-        dump.create_named_pipe_for_dump(dump_filename)
-        processes.append(execute_command(command, shell=True, run_to_completion=False))
-
-    if not dry_run:
-        patterns.append(
-            borgmatic.borg.pattern.Pattern(
-                os.path.join(borgmatic_runtime_directory, 'sqlite_databases'),
-                source=borgmatic.borg.pattern.Pattern_source.HOOK,
+            if dry_run:
+                continue
+
+            dump.create_named_pipe_for_dump(dump_filename)
+            processes.append(execute_command(command, shell=True, run_to_completion=False))
+
+        if not dry_run:
+            patterns.append(
+                borgmatic.borg.pattern.Pattern(
+                    os.path.join(borgmatic_runtime_directory, 'sqlite_databases'),
+                    source=borgmatic.borg.pattern.Pattern_source.HOOK,
+                )
             )
-        )
 
-    return processes
+        return processes
 
 
 def remove_data_source_dumps(

+ 59 - 51
borgmatic/hooks/data_source/zfs.py

@@ -9,6 +9,7 @@ import subprocess
 import borgmatic.borg.pattern
 import borgmatic.config.paths
 import borgmatic.execute
+import borgmatic.hooks.command
 import borgmatic.hooks.data_source.snapshot
 
 logger = logging.getLogger(__name__)
@@ -243,64 +244,71 @@ def dump_data_sources(
 
     If this is a dry run, then don't actually snapshot anything.
     '''
-    dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
-    logger.info(f'Snapshotting ZFS datasets{dry_run_label}')
-
-    # List ZFS datasets to get their mount points, but only consider those patterns that came from
-    # actual user configuration (as opposed to, say, other hooks).
-    zfs_command = hook_config.get('zfs_command', 'zfs')
-    requested_datasets = get_datasets_to_backup(zfs_command, patterns)
-
-    # Snapshot each dataset, rewriting patterns to use the snapshot paths.
-    snapshot_name = f'{BORGMATIC_SNAPSHOT_PREFIX}{os.getpid()}'
-    normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
-
-    if not requested_datasets:
-        logger.warning(f'No ZFS datasets found to snapshot{dry_run_label}')
-
-    for dataset in requested_datasets:
-        full_snapshot_name = f'{dataset.name}@{snapshot_name}'
-        logger.debug(
-            f'Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}'
-        )
-
-        if not dry_run:
-            snapshot_dataset(zfs_command, full_snapshot_name)
-
-        # Mount the snapshot into a particular named temporary directory so that the snapshot ends
-        # up in the Borg archive at the "original" dataset mount point path.
-        snapshot_mount_path = os.path.join(
-            normalized_runtime_directory,
-            'zfs_snapshots',
-            hashlib.shake_256(dataset.mount_point.encode('utf-8')).hexdigest(
-                MOUNT_POINT_HASH_LENGTH
-            ),
-            dataset.mount_point.lstrip(os.path.sep),
-        )
+    with borgmatic.hooks.command.Before_after_hooks(
+        command_hooks=config.get('commands'),
+        before_after='dump_data_sources',
+        umask=config.get('umask'),
+        dry_run=dry_run,
+        hook_name='zfs',
+    ):
+        dry_run_label = ' (dry run; not actually snapshotting anything)' if dry_run else ''
+        logger.info(f'Snapshotting ZFS datasets{dry_run_label}')
+
+        # List ZFS datasets to get their mount points, but only consider those patterns that came from
+        # actual user configuration (as opposed to, say, other hooks).
+        zfs_command = hook_config.get('zfs_command', 'zfs')
+        requested_datasets = get_datasets_to_backup(zfs_command, patterns)
+
+        # Snapshot each dataset, rewriting patterns to use the snapshot paths.
+        snapshot_name = f'{BORGMATIC_SNAPSHOT_PREFIX}{os.getpid()}'
+        normalized_runtime_directory = os.path.normpath(borgmatic_runtime_directory)
+
+        if not requested_datasets:
+            logger.warning(f'No ZFS datasets found to snapshot{dry_run_label}')
+
+        for dataset in requested_datasets:
+            full_snapshot_name = f'{dataset.name}@{snapshot_name}'
+            logger.debug(
+                f'Creating ZFS snapshot {full_snapshot_name} of {dataset.mount_point}{dry_run_label}'
+            )
 
-        logger.debug(
-            f'Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}'
-        )
+            if not dry_run:
+                snapshot_dataset(zfs_command, full_snapshot_name)
+
+            # Mount the snapshot into a particular named temporary directory so that the snapshot ends
+            # up in the Borg archive at the "original" dataset mount point path.
+            snapshot_mount_path = os.path.join(
+                normalized_runtime_directory,
+                'zfs_snapshots',
+                hashlib.shake_256(dataset.mount_point.encode('utf-8')).hexdigest(
+                    MOUNT_POINT_HASH_LENGTH
+                ),
+                dataset.mount_point.lstrip(os.path.sep),
+            )
 
-        if dry_run:
-            continue
+            logger.debug(
+                f'Mounting ZFS snapshot {full_snapshot_name} at {snapshot_mount_path}{dry_run_label}'
+            )
 
-        mount_snapshot(
-            hook_config.get('mount_command', 'mount'), full_snapshot_name, snapshot_mount_path
-        )
+            if dry_run:
+                continue
 
-        for pattern in dataset.contained_patterns:
-            snapshot_pattern = make_borg_snapshot_pattern(
-                pattern, dataset, normalized_runtime_directory
+            mount_snapshot(
+                hook_config.get('mount_command', 'mount'), full_snapshot_name, snapshot_mount_path
             )
 
-            # Attempt to update the pattern in place, since pattern order matters to Borg.
-            try:
-                patterns[patterns.index(pattern)] = snapshot_pattern
-            except ValueError:
-                patterns.append(snapshot_pattern)
+            for pattern in dataset.contained_patterns:
+                snapshot_pattern = make_borg_snapshot_pattern(
+                    pattern, dataset, normalized_runtime_directory
+                )
+
+                # Attempt to update the pattern in place, since pattern order matters to Borg.
+                try:
+                    patterns[patterns.index(pattern)] = snapshot_pattern
+                except ValueError:
+                    patterns.append(snapshot_pattern)
 
-    return []
+        return []
 
 
 def unmount_snapshot(umount_command, snapshot_mount_path):  # pragma: no cover

+ 1 - 0
borgmatic/hooks/dispatch.py

@@ -3,6 +3,7 @@ import importlib
 import logging
 import pkgutil
 
+import borgmatic.hooks.command
 import borgmatic.hooks.credential
 import borgmatic.hooks.data_source
 import borgmatic.hooks.monitoring

+ 1 - 0
docs/_includes/index.css

@@ -165,6 +165,7 @@ ul {
 }
 li {
 	padding: .25em 0;
+        line-height: 1.5;
 }
 li ul {
         list-style-type: disc;

+ 203 - 48
docs/how-to/add-preparation-and-cleanup-steps-to-backups.md

@@ -8,16 +8,118 @@ eleventyNavigation:
 ## Preparation and cleanup hooks
 
 If you find yourself performing preparation tasks before your backup runs or
-cleanup work afterwards, borgmatic command hooks may be of interest. These are
-custom shell commands you can configure borgmatic to execute at various points
-as it runs.
+doing cleanup work afterwards, borgmatic command hooks may be of interest. These
+are custom shell commands you can configure borgmatic to execute at various
+points as it runs.
 
-But if you're looking to backup a database, it's probably easier to use the
+(But if you're looking to backup a database, it's probably easier to use the
 [database backup
 feature](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
-instead.
+instead.)
 
-You can specify `before_backup` hooks to perform preparation steps before
+<span class="minilink minilink-addedin">New in version 2.0.0</span> Command
+hooks are now configured via a list of `commands:` in your borgmatic
+configuration file. For example:
+
+```yaml
+commands:
+    - before: action
+      when: [create]
+      run:
+          - echo "Before create!"
+    - after: action
+      when:
+          - create
+          - prune
+      run:
+          - echo "After create or prune!"
+    - after: error
+      run:
+          - echo "Something went wrong!"
+```
+
+If you're coming from an older version of borgmatic, there is tooling to help
+you [upgrade your
+configuration](https://torsion.org/borgmatic/docs/how-to/upgrade/#upgrading-your-configuration)
+to this new command hook format.
+
+Note that if a `run:` command contains a special YAML character such as a colon,
+you may need to quote the entire string (or use a [multiline
+string](https://yaml-multiline.info/)) to avoid an error:
+
+```yaml
+commands:
+    - before: action
+      when: [create]
+      run:
+          - "echo Backup: start"
+```
+
+Each command in the `commands:` list has the following options:
+
+ * `before` or `after`: Name for the point in borgmatic's execution that the commands should be run before or after, one of:
+    * `action` runs before each action for each repository. This replaces the deprecated `before_create`, `after_prune`, etc.
+    * `repository` runs before or after all actions for each repository. This replaces the deprecated `before_actions` and `after_actions`.
+    * `configuration` runs before or after all actions and repositories in the current configuration file.
+    * `everything` runs before or after all configuration files. This replaces the deprecated `before_everything` and `after_everything`.
+    * `error` runs after an error occurs—and it's only available for `after`. This replaces the deprecated `on_error` hook.
+ * `when`: Only trigger the hook when borgmatic is run with particular actions (`create`, `prune`, etc.) listed here. Defaults to running for all actions.
+ * `run`: List of one or more shell commands or scripts to run when this command hook is triggered.
+
+borgmatic does not run `error` hooks if an error occurs within an `everything`
+hook.
+
+There's also another command hook that works a little differently:
+
+```yaml
+commands:
+    - before: dump_data_sources
+      hooks: [postgresql]
+      run:
+          - echo "Right before the PostgreSQL database dump!"
+```
+
+This command hook has the following options:
+
+ * `before` or `after`: `dump_data_sources`
+ * `hooks`: Names of other hooks that this command hook applies to, e.g. `postgresql`, `mariadb`, `zfs`, `btrfs`, etc. Defaults to all hooks of the relevant type.
+ * `run`: One or more shell commands or scripts to run when this command hook is triggered.
+
+
+### Order of execution
+
+Here's a way of visualizing how all of these command hooks slot into borgmatic's
+execution.
+
+Let's say you've got a borgmatic configuration file with a configured
+repository. And suppose you configure several command hooks and then run
+borgmatic for the `create` and `prune` actions. Here's the order of execution:
+
+ * Run `before: everything` hooks (from all configuration files).
+    * Run `before: configuration` hooks (from the first configuration file).
+        * Run `before: repository` hooks (for the first repository).
+            * Run `before: action` hooks for `create`.
+                * Run `before: dump_data_sources` hooks (e.g. for the PostgreSQL hook).
+                * Actually dump data sources (e.g. PostgreSQL databases).
+                * Run `after: dump_data_sources` hooks (e.g. for the PostgreSQL hook).
+            * Actually run the `create` action (e.g. `borg create`).
+            * Run `after: action` hooks for `create`.
+            * Run `before: action` hooks for `prune`.
+            * Actually run the `prune` action (e.g. `borg prune`).
+            * Run `after: action` hooks for `prune`.
+        * Run `after: repository` hooks (for the first repository).
+    * Run `after: configuration` hooks (from the first configuration file).
+ * Run `after: everything` hooks (from all configuration files).
+
+This same order of execution extends to multiple repositories and/or
+configuration files.
+
+
+### Deprecated command hooks
+
+<span class="minilink minilink-addedin">Prior to version 2.0.0</span> The
+command hooks worked a little differently. In these older versions of borgmatic,
+you can specify `before_backup` hooks to perform preparation steps before
 running backups and specify `after_backup` hooks to perform cleanup steps
 afterwards. Here's an example:
 
@@ -58,49 +160,13 @@ but not if an error occurs in a previous hook or in the backups themselves.
 (Prior to borgmatic 1.6.0, these hooks instead ran once per configuration file
 rather than once per repository.)
 
-
-## Variable interpolation
-
-The before and after action hooks support interpolating particular runtime
-variables into the hook command. Here's an example that assumes you provide a
-separate shell script:
-
-```yaml
-after_prune:
-    - record-prune.sh "{configuration_filename}" "{repository}"
-```
-
-<span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put
-this option in the `hooks:` section of your configuration.
-
-In this example, when the hook is triggered, borgmatic interpolates runtime
-values into the hook command: the borgmatic configuration filename and the
-paths of the current Borg repository. Here's the full set of supported
-variables you can use here:
-
- * `configuration_filename`: borgmatic configuration filename in which the
-   hook was defined
- * `log_file`
-   <span class="minilink minilink-addedin">New in version 1.7.12</span>:
-   path of the borgmatic log file, only set when the `--log-file` flag is used
- * `repository`: path of the current repository as configured in the current
-   borgmatic configuration file
- * `repository_label` <span class="minilink minilink-addedin">New in version
-   1.8.12</span>: label of the current repository as configured in the current
-   borgmatic configuration file
-
-Note that you can also interpolate in [arbitrary environment
-variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
-
-
-## Global hooks
-
 You can also use `before_everything` and `after_everything` hooks to perform
 global setup or cleanup:
 
 ```yaml
 before_everything:
     - set-up-stuff-globally
+
 after_everything:
     - clean-up-stuff-globally
 ```
@@ -118,13 +184,102 @@ but only if there is a `create` action. It runs even if an error occurs during
 a backup or a backup hook, but not if an error occurs during a
 `before_everything` hook.
 
+`on_error` hooks run when an error occurs, but only if there is a `create`,
+`prune`, `compact`, or `check` action. For instance, borgmatic can run
+configurable shell commands to fire off custom error notifications or take other
+actions, so you can get alerted as soon as something goes wrong. Here's a
+not-so-useful example:
 
-## Error hooks
+```yaml
+on_error:
+    - echo "Error while creating a backup or running a backup hook."
+```
 
-borgmatic also runs `on_error` hooks if an error occurs, either when creating
-a backup or running a backup hook. See the [monitoring and alerting
-documentation](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
-for more information.
+<span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put
+this option in the `hooks:` section of your configuration.
+
+The `on_error` hook supports interpolating particular runtime variables into
+the hook command. Here's an example that assumes you provide a separate shell
+script to handle the alerting:
+
+```yaml
+on_error:
+    - send-text-message.sh
+```
+
+borgmatic does not run `on_error` hooks if an error occurs within a
+`before_everything` or `after_everything` hook.
+
+
+## Variable interpolation
+
+The command action hooks support interpolating particular runtime variables into
+the commands that are run. Here's are a couple examples that assume you provide
+separate shell scripts:
+
+```yaml
+commands:
+    - after: action
+      when: [prune]
+      run:
+          - record-prune.sh {configuration_filename} {repository}
+    - after: error
+      when: [create]
+      run:
+          - send-text-message.sh {configuration_filename} {repository}
+```
+
+In this example, when the hook is triggered, borgmatic interpolates runtime
+values into each hook command: the borgmatic configuration filename and the
+paths of the current Borg repository.
+
+Here's the full set of supported variables you can use here:
+
+ * `configuration_filename`: borgmatic configuration filename in which the
+   hook was defined
+ * `log_file`
+   <span class="minilink minilink-addedin">New in version 1.7.12</span>:
+   path of the borgmatic log file, only set when the `--log-file` flag is used
+ * `repository`: path of the current repository as configured in the current
+   borgmatic configuration file, if applicable to the current hook
+ * `repository_label` <span class="minilink minilink-addedin">New in version
+   1.8.12</span>: label of the current repository as configured in the current
+   borgmatic configuration file, if applicable to the current hook
+ * `error`: the error message itself, only applies to `error` hooks
+ * `output`: output of the command that failed, only applies to `error` hooks
+   (may be blank if an error occurred without running a command)
+
+Not all command hooks support all variables. For instance, the `everything` and
+`configuration` hooks don't support repository variables because those hooks
+don't run in the context of a single repository. But the deprecated command
+hooks (`before_backup`, `on_error`, etc.) do generally support variable
+interpolation.
+
+borgmatic automatically escapes these interpolated values to prevent shell
+injection attacks. One implication is that you shouldn't wrap the interpolated
+values in your own quotes, as that will interfere with the quoting performed by
+borgmatic and result in your command receiving incorrect arguments. For
+instance, this won't work:
+
+```yaml
+commands:
+    - after: error
+      run:
+          # Don't do this! It won't work, as the {error} value is already quoted.
+          - send-text-message.sh "Uh oh: {error}"
+```
+
+Do this instead:
+
+```yaml
+commands:
+    - after: error
+      run:
+          - send-text-message.sh {error}
+```
+
+Note that you can also interpolate [arbitrary environment
+variables](https://torsion.org/borgmatic/docs/how-to/provide-your-passwords/).
 
 
 ## Hook output

+ 47 - 46
docs/how-to/backup-to-a-removable-drive-or-an-intermittent-server.md

@@ -29,17 +29,14 @@ concept of "soft failure" come in.
 
 This feature leverages [borgmatic command
 hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
-so first familiarize yourself with them. The idea is that you write a simple
-test in the form of a borgmatic hook to see if backups should proceed or not.
+so familiarize yourself with them first. The idea is that you write a simple
+test in the form of a borgmatic command hook to see if backups should proceed or
+not.
 
 The way the test works is that if any of your hook commands return a special
 exit status of 75, that indicates to borgmatic that it's a temporary failure,
 and borgmatic should skip all subsequent actions for the current repository.
 
-<span class="minilink minilink-addedin">Prior to version 1.9.0</span> Soft
-failures skipped subsequent actions for *all* repositories in the
-configuration file, rather than just for the current repository.
-
 If you return any status besides 75, then it's a standard success or error.
 (Zero is success; anything else other than 75 is an error).
 
@@ -62,33 +59,37 @@ these options in the `location:` section of your configuration.
 <span class="minilink minilink-addedin">Prior to version 1.7.10</span> Omit
 the `path:` portion of the `repositories` list.
 
-Then, write a `before_backup` hook in that same configuration file that uses
-the external `findmnt` utility to see whether the drive is mounted before
-proceeding.
+Then, make a command hook in that same configuration file that uses the external
+`findmnt` utility to see whether the drive is mounted before proceeding.
+
+```yaml
+commands:
+    - before: repository
+      run:
+          - findmnt /mnt/removable > /dev/null || exit 75
+```
+
+<span class="minilink minilink-addedin">Prior to version 2.0.0</span> Use the
+deprecated `before_actions` hook instead:
 
 ```yaml
-before_backup:
+before_actions:
     - findmnt /mnt/removable > /dev/null || exit 75
 ```
 
 <span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put this
 option in the `hooks:` section of your configuration.
 
+<span class="minilink minilink-addedin">Prior to version 1.7.0</span> Use
+`before_create` or similar instead of `before_actions`, which was introduced in
+borgmatic 1.7.0.
+
 What this does is check if the `findmnt` command errors when probing for a
 particular mount point. If it does error, then it returns exit code 75 to
 borgmatic. borgmatic logs the soft failure, skips all further actions for the
 current repository, and proceeds onward to any other repositories and/or
 configuration files you may have.
 
-If you'd prefer not to use a separate configuration file, and you'd rather
-have multiple repositories in a single configuration file, you can make your
-`before_backup` soft failure test [vary by
-repository](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation).
-That might require calling out to a separate script though.
-
-Note that `before_backup` only runs on the `create` action. See below about
-optionally using `before_actions` instead.
-
 You can imagine a similar check for the sometimes-online server case:
 
 ```yaml
@@ -98,50 +99,50 @@ source_directories:
 repositories:
     - path: ssh://me@buddys-server.org/./backup.borg
 
-before_backup:
-    - ping -q -c 1 buddys-server.org > /dev/null || exit 75
+commands:
+    - before: repository
+      run:
+          - ping -q -c 1 buddys-server.org > /dev/null || exit 75
 ```
 
 Or to only run backups if the battery level is high enough:
 
 ```yaml
-before_backup:
-    - is_battery_percent_at_least.sh 25
+commands:
+    - before: repository
+      run:
+          - is_battery_percent_at_least.sh 25
 ```
 
-(Writing the battery script is left as an exercise to the reader.)
-
-<span class="minilink minilink-addedin">New in version 1.7.0</span> The
-`before_actions` and `after_actions` hooks run before/after all the actions
-(like `create`, `prune`, etc.) for each repository. So if you'd like your soft
-failure command hook to run regardless of action, consider using
-`before_actions` instead of `before_backup`.
+Writing the battery script is left as an exercise to the reader.
 
 
 ## Caveats and details
 
 There are some caveats you should be aware of with this feature.
 
- * You'll generally want to put a soft failure command in the `before_backup`
+ * You'll generally want to put a soft failure command in a `before` command
    hook, so as to gate whether the backup action occurs. While a soft failure is
-   also supported in the `after_backup` hook, returning a soft failure there
+   also supported in an `after` command hook, returning a soft failure there
    won't prevent any actions from occurring, because they've already occurred!
-   Similarly, you can return a soft failure from an `on_error` hook, but at
+   Similarly, you can return a soft failure from an `error` command hook, but at
    that point it's too late to prevent the error.
  * Returning a soft failure does prevent further commands in the same hook from
-   executing. So, like a standard error, it is an "early out". Unlike a standard
+   executing. So, like a standard error, it is an "early out." Unlike a standard
    error, borgmatic does not display it in angry red text or consider it a
    failure.
- * Any given soft failure only applies to the a single borgmatic repository
-   (as of borgmatic 1.9.0). So if you have other repositories you don't want
-   soft-failed, then make your soft fail test [vary by
-   repository](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation)—or
-   put anything that you don't want soft-failed (like always-online cloud
-   backups) in separate configuration files from your soft-failing
-   repositories.
+ * <span class="minilink minilink-addedin">New in version 1.9.0</span> Soft
+   failures in `action` or `before_*` command hooks only skip the current
+   repository rather than all repositories in a configuration file.
+ * If you're writing a soft failure script that you want to vary based on the
+   current repository, for instance so you can have multiple repositories in a
+   single configuration file, have a look at [command hook variable
+   interpolation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/#variable-interpolation).
+   And there's always still the option of putting anything that you don't want
+   soft-failed (like always-online cloud backups) in separate configuration
+   files from your soft-failing repositories.
  * The soft failure doesn't have to test anything related to a repository. You
-   can even perform a test to make sure that individual source directories are
-   mounted and available. Use your imagination!
- * The soft failure feature also works for before/after hooks for other
-   actions as well. But it is not implemented for `before_everything` or
-   `after_everything`.
+   can even perform a test that individual source directories are mounted and
+   available. Use your imagination!
+ * Soft failures are not currently implemented for `everything`,
+   `before_everything`, or `after_everything` command hooks.

+ 49 - 134
docs/how-to/monitor-your-backups.md

@@ -14,140 +14,55 @@ and alerting comes in.
 
 There are several different ways you can monitor your backups and find out
 whether they're succeeding. Which of these you choose to do is up to you and
-your particular infrastructure.
-
-### Job runner alerts
-
-The easiest place to start is with failure alerts from the [scheduled job
-runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot)
-(cron, systemd, etc.) that's running borgmatic. But note that if the job
-doesn't even get scheduled (e.g. due to the job runner not running), you
-probably won't get an alert at all! Still, this is a decent first line of
-defense, especially when combined with some of the other approaches below.
-
-### Commands run on error
-
-The `on_error` hook allows you to run an arbitrary command or script when
-borgmatic itself encounters an error running your backups. So for instance,
-you can run a script to send yourself a text message alert. But note that if
-borgmatic doesn't actually run, this alert won't fire.  See [error
-hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
-below for how to configure this.
-
-### Third-party monitoring services
-
-borgmatic integrates with these monitoring services and libraries, pinging
-them as backups happen:
-
- * [Apprise](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook)
- * [Cronhub](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook)
- * [Cronitor](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook)
- * [Grafana Loki](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook)
- * [Healthchecks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook)
- * [ntfy](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook)
- * [PagerDuty](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook)
- * [Pushover](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pushover-hook)
- * [Sentry](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#sentry-hook)
- * [Uptime Kuma](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#uptime-kuma-hook)
- * [Zabbix](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#zabbix-hook)
-
-The idea is that you'll receive an alert when something goes wrong or when the
-service doesn't hear from borgmatic for a configured interval (if supported).
-See the documentation links above for configuration information.
-
-While these services and libraries offer different features, you probably only
-need to use one of them at most.
-
-
-### Third-party monitoring software
-
-You can use traditional monitoring software to consume borgmatic JSON output
-and track when the last successful backup occurred. See [scripting
-borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
-below for how to configure this.
-
-### Borg hosting providers
-
-Most [Borg hosting
-providers](https://torsion.org/borgmatic/#hosting-providers) include
-monitoring and alerting as part of their offering. This gives you a dashboard
-to check on all of your backups, and can alert you if the service doesn't hear
-from borgmatic for a configured interval.
-
-### Consistency checks
-
-While not strictly part of monitoring, if you want confidence that your
-backups are not only running but are restorable as well, you can configure
-particular [consistency
-checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
-or even script full [extract
-tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
-
-
-## Error hooks
-
-When an error occurs during a `create`, `prune`, `compact`, or `check` action,
-borgmatic can run configurable shell commands to fire off custom error
-notifications or take other actions, so you can get alerted as soon as
-something goes wrong. Here's a not-so-useful example:
-
-```yaml
-on_error:
-    - echo "Error while creating a backup or running a backup hook."
-```
-
-<span class="minilink minilink-addedin">Prior to version 1.8.0</span> Put
-this option in the `hooks:` section of your configuration.
-
-The `on_error` hook supports interpolating particular runtime variables into
-the hook command. Here's an example that assumes you provide a separate shell
-script to handle the alerting:
-
-```yaml
-on_error:
-    - send-text-message.sh {configuration_filename} {repository}
-```
-
-In this example, when the error occurs, borgmatic interpolates runtime values
-into the hook command: the borgmatic configuration filename and the path of
-the repository. Here's the full set of supported variables you can use here:
-
- * `configuration_filename`: borgmatic configuration filename in which the
-   error occurred
- * `repository`: path of the repository in which the error occurred (may be
-   blank if the error occurs in a hook)
- * `error`: the error message itself
- * `output`: output of the command that failed (may be blank if an error
-   occurred without running a command)
-
-Note that borgmatic runs the `on_error` hooks only for `create`, `prune`,
-`compact`, or `check` actions/hooks in which an error occurs and not other
-actions. borgmatic does not run `on_error` hooks if an error occurs within a
-`before_everything` or `after_everything` hook. For more about hooks, see the
-[borgmatic hooks
-documentation](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/),
-especially the security information.
-
-<span class="minilink minilink-addedin">New in version 1.8.7</span> borgmatic
-automatically escapes these interpolated values to prevent shell injection
-attacks. One implication of this change is that you shouldn't wrap the
-interpolated values in your own quotes, as that will interfere with the
-quoting performed by borgmatic and result in your command receiving incorrect
-arguments. For instance, this won't work:
-
-
-```yaml
-on_error:
-    # Don't do this! It won't work, as the {error} value is already quoted.
-    - send-text-message.sh "Uh oh: {error}"
-```
-
-Do this instead:
-
-```yaml
-on_error:
-    - send-text-message.sh {error}
-```
+your particular infrastructure:
+
+ * **Job runner alerts**: The easiest place to start is with failure alerts from
+   the [scheduled job
+   runner](https://torsion.org/borgmatic/docs/how-to/set-up-backups/#autopilot)
+   (cron, systemd, etc.) that's running borgmatic. But note that if the job
+   doesn't even get scheduled (e.g. due to the job runner not running), you
+   probably won't get an alert at all! Still, this is a decent first line of
+   defense, especially when combined with some of the other approaches below.
+ * **Third-party monitoring services:** borgmatic integrates with these monitoring
+   services and libraries, pinging them as backups happen. The idea is that
+   you'll receive an alert when something goes wrong or when the service doesn't
+   hear from borgmatic for a configured interval (if supported). While these
+   services and libraries offer different features, you probably only need to
+   use one of them at most. See these documentation links for configuration
+   information:
+     * [Apprise](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook)
+     * [Cronhub](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronhub-hook)
+     * [Cronitor](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook)
+     * [Grafana Loki](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook)
+     * [Healthchecks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook)
+     * [ntfy](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook)
+     * [PagerDuty](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pagerduty-hook)
+     * [Pushover](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#pushover-hook)
+     * [Sentry](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#sentry-hook)
+     * [Uptime Kuma](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#uptime-kuma-hook)
+     * [Zabbix](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#zabbix-hook)
+ * **Third-party monitoring software:** You can use traditional monitoring
+   software to consume borgmatic JSON output and track when the last successful
+   backup occurred. See [scripting
+   borgmatic](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#scripting-borgmatic)
+   below for how to configure this.
+ * **Borg hosting providers:** Some [Borg hosting
+   providers](https://torsion.org/borgmatic/#hosting-providers) include
+   monitoring and alerting as part of their offering. This gives you a dashboard
+   to check on all of your backups, and can alert you if the service doesn't
+   hear from borgmatic for a configured interval.
+ * **Consistency checks:** While not strictly part of monitoring, if you want
+   confidence that your backups are not only running but are restorable as well,
+   you can configure particular [consistency
+   checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
+   or even script full [extract
+   tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
+ * **Commands run on error:** borgmatic's command hooks support running
+   arbitrary commands or scripts when borgmatic itself encounters an error
+   running your backups. So for instance, you can run a script to send yourself
+   a text message alert. But note that if borgmatic doesn't actually run, this
+   alert won't fire. See the [documentation on command hooks](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
+   for details.
 
 
 ## Healthchecks hook

+ 1 - 1
pyproject.toml

@@ -1,6 +1,6 @@
 [project]
 name = "borgmatic"
-version = "1.9.14"
+version = "2.0.0dev0"
 authors = [
   { name="Dan Helfman", email="witten@torsion.org" },
 ]

+ 140 - 31
tests/integration/config/test_generate.py

@@ -16,6 +16,116 @@ def test_insert_newline_before_comment_does_not_raise():
     module.insert_newline_before_comment(config, field_name)
 
 
+def test_schema_to_sample_configuration_comments_out_non_default_options():
+    schema = {
+        'type': 'object',
+        'properties': dict(
+            [
+                ('field1', {'example': 'Example 1'}),
+                ('field2', {'example': 'Example 2'}),
+                ('source_directories', {'example': 'Example 3'}),
+            ]
+        ),
+    }
+
+    config = module.schema_to_sample_configuration(schema)
+
+    assert config == dict(
+        [
+            ('field1', 'Example 1'),
+            ('field2', 'Example 2'),
+            ('source_directories', 'Example 3'),
+        ]
+    )
+    assert 'COMMENT_OUT' in config.ca.items['field1'][1][-1]._value
+    assert 'COMMENT_OUT' in config.ca.items['field2'][1][-1]._value
+    assert 'source_directories' not in config.ca.items
+
+
+def test_schema_to_sample_configuration_comments_out_non_source_config_options():
+    schema = {
+        'type': 'object',
+        'properties': dict(
+            [
+                ('field1', {'example': 'Example 1'}),
+                ('field2', {'example': 'Example 2'}),
+                ('field3', {'example': 'Example 3'}),
+            ]
+        ),
+    }
+    source_config = {'field3': 'value'}
+
+    config = module.schema_to_sample_configuration(schema, source_config)
+
+    assert config == dict(
+        [
+            ('field1', 'Example 1'),
+            ('field2', 'Example 2'),
+            ('field3', 'Example 3'),
+        ]
+    )
+    assert 'COMMENT_OUT' in config.ca.items['field1'][1][-1]._value
+    assert 'COMMENT_OUT' in config.ca.items['field2'][1][-1]._value
+    assert 'field3' not in config.ca.items
+
+
+def test_schema_to_sample_configuration_comments_out_non_default_options_in_sequence_of_maps():
+    schema = {
+        'type': 'array',
+        'items': {
+            'type': 'object',
+            'properties': dict(
+                [
+                    ('field1', {'example': 'Example 1'}),
+                    ('field2', {'example': 'Example 2'}),
+                    ('source_directories', {'example': 'Example 3'}),
+                ]
+            ),
+        },
+    }
+
+    config = module.schema_to_sample_configuration(schema)
+
+    assert config == [
+        dict(
+            [('field1', 'Example 1'), ('field2', 'Example 2'), ('source_directories', 'Example 3')]
+        )
+    ]
+
+    # The first field in a sequence does not get commented.
+    assert 'field1' not in config[0].ca.items
+    assert 'COMMENT_OUT' in config[0].ca.items['field2'][1][-1]._value
+    assert 'source_directories' not in config[0].ca.items
+
+
+def test_schema_to_sample_configuration_comments_out_non_source_config_options_in_sequence_of_maps():
+    schema = {
+        'type': 'array',
+        'items': {
+            'type': 'object',
+            'properties': dict(
+                [
+                    ('field1', {'example': 'Example 1'}),
+                    ('field2', {'example': 'Example 2'}),
+                    ('field3', {'example': 'Example 3'}),
+                ]
+            ),
+        },
+    }
+    source_config = [{'field3': 'value'}]
+
+    config = module.schema_to_sample_configuration(schema, source_config)
+
+    assert config == [
+        dict([('field1', 'Example 1'), ('field2', 'Example 2'), ('field3', 'Example 3')])
+    ]
+
+    # The first field in a sequence does not get commented.
+    assert 'field1' not in config[0].ca.items
+    assert 'COMMENT_OUT' in config[0].ca.items['field2'][1][-1]._value
+    assert 'field3' not in config[0].ca.items
+
+
 def test_comment_out_line_skips_blank_line():
     line = '    \n'
 
@@ -152,7 +262,7 @@ def test_add_comments_to_configuration_sequence_of_maps_without_description_does
     module.add_comments_to_configuration_sequence(config, schema)
 
 
-def test_add_comments_to_configuration_object_does_not_raise():
+def test_add_comments_to_configuration_comments_out_non_default_options():
     # Ensure that it can deal with fields both in the schema and missing from the schema.
     config = module.ruamel.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)])
     schema = {
@@ -162,44 +272,43 @@ def test_add_comments_to_configuration_object_does_not_raise():
 
     module.add_comments_to_configuration_object(config, schema)
 
+    assert 'COMMENT_OUT' in config.ca.items['foo'][1][-1]._value
+    assert 'COMMENT_OUT' in config.ca.items['bar'][1][-1]._value
+    assert 'baz' not in config.ca.items
 
-def test_add_comments_to_configuration_object_with_skip_first_does_not_raise():
-    config = module.ruamel.yaml.comments.CommentedMap([('foo', 33)])
-    schema = {'type': 'object', 'properties': {'foo': {'description': 'Foo'}}}
-
-    module.add_comments_to_configuration_object(config, schema, skip_first=True)
-
-
-def test_remove_commented_out_sentinel_keeps_other_comments():
-    field_name = 'foo'
-    config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)])
-    config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.\nCOMMENT_OUT')
-
-    module.remove_commented_out_sentinel(config, field_name)
-
-    comments = config.ca.items[field_name][module.RUAMEL_YAML_COMMENTS_INDEX]
-    assert len(comments) == 1
-    assert comments[0].value == '# Actual comment.\n'
 
+def test_add_comments_to_configuration_comments_out_non_source_config_options():
+    # Ensure that it can deal with fields both in the schema and missing from the schema.
+    config = module.ruamel.yaml.comments.CommentedMap(
+        [('repositories', 33), ('bar', 44), ('baz', 55)]
+    )
+    schema = {
+        'type': 'object',
+        'properties': {
+            'repositories': {'description': 'repositories'},
+            'bar': {'description': 'Bar'},
+        },
+    }
 
-def test_remove_commented_out_sentinel_without_sentinel_keeps_other_comments():
-    field_name = 'foo'
-    config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)])
-    config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.')
+    module.add_comments_to_configuration_object(config, schema)
 
-    module.remove_commented_out_sentinel(config, field_name)
+    assert 'repositories' in config.ca.items
+    assert 'COMMENT_OUT' in config.ca.items['bar'][1][-1]._value
+    assert 'baz' not in config.ca.items
 
-    comments = config.ca.items[field_name][module.RUAMEL_YAML_COMMENTS_INDEX]
-    assert len(comments) == 1
-    assert comments[0].value == '# Actual comment.\n'
 
+def test_add_comments_to_configuration_object_with_skip_first_does_not_comment_out_first_option():
+    config = module.ruamel.yaml.comments.CommentedMap([('foo', 33), ('bar', 44), ('baz', 55)])
+    schema = {
+        'type': 'object',
+        'properties': {'foo': {'description': 'Foo'}, 'bar': {'description': 'Bar'}},
+    }
 
-def test_remove_commented_out_sentinel_on_unknown_field_does_not_raise():
-    field_name = 'foo'
-    config = module.ruamel.yaml.comments.CommentedMap([(field_name, 33)])
-    config.yaml_set_comment_before_after_key(key=field_name, before='Actual comment.')
+    module.add_comments_to_configuration_object(config, schema, skip_first=True)
 
-    module.remove_commented_out_sentinel(config, 'unknown')
+    assert 'foo' not in config.ca.items
+    assert 'COMMENT_OUT' in config.ca.items['bar'][1][-1]._value
+    assert 'baz' not in config.ca.items
 
 
 def test_generate_sample_configuration_does_not_raise():

+ 0 - 10
tests/unit/actions/test_check.py

@@ -1405,7 +1405,6 @@ def test_run_check_checks_archives_for_configured_repository():
     flexmock(module).should_receive('make_check_time_path')
     flexmock(module).should_receive('write_check_time')
     flexmock(module.borgmatic.borg.extract).should_receive('extract_last_archive_dry_run').never()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     check_arguments = flexmock(
         repository=None,
         progress=flexmock(),
@@ -1419,7 +1418,6 @@ def test_run_check_checks_archives_for_configured_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,
@@ -1441,7 +1439,6 @@ def test_run_check_runs_configured_extract_check():
     flexmock(module.borgmatic.borg.extract).should_receive('extract_last_archive_dry_run').once()
     flexmock(module).should_receive('make_check_time_path')
     flexmock(module).should_receive('write_check_time')
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     check_arguments = flexmock(
         repository=None,
         progress=flexmock(),
@@ -1455,7 +1452,6 @@ def test_run_check_runs_configured_extract_check():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,
@@ -1480,7 +1476,6 @@ def test_run_check_runs_configured_spot_check():
     flexmock(module.borgmatic.actions.check).should_receive('spot_check').once()
     flexmock(module).should_receive('make_check_time_path')
     flexmock(module).should_receive('write_check_time')
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     check_arguments = flexmock(
         repository=None,
         progress=flexmock(),
@@ -1494,7 +1489,6 @@ def test_run_check_runs_configured_spot_check():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,
@@ -1516,7 +1510,6 @@ def test_run_check_without_checks_runs_nothing_except_hooks():
     flexmock(module).should_receive('make_check_time_path')
     flexmock(module).should_receive('write_check_time').never()
     flexmock(module.borgmatic.borg.extract).should_receive('extract_last_archive_dry_run').never()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     check_arguments = flexmock(
         repository=None,
         progress=flexmock(),
@@ -1530,7 +1523,6 @@ def test_run_check_without_checks_runs_nothing_except_hooks():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,
@@ -1569,7 +1561,6 @@ def test_run_check_checks_archives_in_selected_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,
@@ -1597,7 +1588,6 @@ def test_run_check_bails_if_repository_does_not_match():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         check_arguments=check_arguments,
         global_arguments=global_arguments,

+ 0 - 4
tests/unit/actions/test_compact.py

@@ -8,7 +8,6 @@ def test_compact_actions_calls_hooks_for_configured_repository():
     flexmock(module.borgmatic.borg.feature).should_receive('available').and_return(True)
     flexmock(module.borgmatic.config.validate).should_receive('repositories_match').never()
     flexmock(module.borgmatic.borg.compact).should_receive('compact_segments').once()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     compact_arguments = flexmock(
         repository=None, progress=flexmock(), cleanup_commits=flexmock(), threshold=flexmock()
     )
@@ -18,7 +17,6 @@ def test_compact_actions_calls_hooks_for_configured_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={},
-        hook_context={},
         local_borg_version=None,
         compact_arguments=compact_arguments,
         global_arguments=global_arguments,
@@ -44,7 +42,6 @@ def test_compact_runs_with_selected_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={},
-        hook_context={},
         local_borg_version=None,
         compact_arguments=compact_arguments,
         global_arguments=global_arguments,
@@ -70,7 +67,6 @@ def test_compact_bails_if_repository_does_not_match():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={},
-        hook_context={},
         local_borg_version=None,
         compact_arguments=compact_arguments,
         global_arguments=global_arguments,

+ 0 - 7
tests/unit/actions/test_create.py

@@ -433,7 +433,6 @@ def test_run_create_executes_and_calls_hooks_for_configured_repository():
         flexmock()
     )
     flexmock(module.borgmatic.borg.create).should_receive('create_archive').once()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').and_return({})
     flexmock(module.borgmatic.hooks.dispatch).should_receive(
         'call_hooks_even_if_unconfigured'
@@ -456,7 +455,6 @@ def test_run_create_executes_and_calls_hooks_for_configured_repository():
             repository={'path': 'repo'},
             config={},
             config_paths=['/tmp/test.yaml'],
-            hook_context={},
             local_borg_version=None,
             create_arguments=create_arguments,
             global_arguments=global_arguments,
@@ -476,7 +474,6 @@ def test_run_create_runs_with_selected_repository():
         flexmock()
     )
     flexmock(module.borgmatic.borg.create).should_receive('create_archive').once()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').and_return({})
     flexmock(module.borgmatic.hooks.dispatch).should_receive(
         'call_hooks_even_if_unconfigured'
@@ -499,7 +496,6 @@ def test_run_create_runs_with_selected_repository():
             repository={'path': 'repo'},
             config={},
             config_paths=['/tmp/test.yaml'],
-            hook_context={},
             local_borg_version=None,
             create_arguments=create_arguments,
             global_arguments=global_arguments,
@@ -532,7 +528,6 @@ def test_run_create_bails_if_repository_does_not_match():
             repository='repo',
             config={},
             config_paths=['/tmp/test.yaml'],
-            hook_context={},
             local_borg_version=None,
             create_arguments=create_arguments,
             global_arguments=global_arguments,
@@ -556,7 +551,6 @@ def test_run_create_produces_json():
     )
     parsed_json = flexmock()
     flexmock(module.borgmatic.actions.json).should_receive('parse_json').and_return(parsed_json)
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     flexmock(module.borgmatic.hooks.dispatch).should_receive('call_hooks').and_return({})
     flexmock(module.borgmatic.hooks.dispatch).should_receive(
         'call_hooks_even_if_unconfigured'
@@ -579,7 +573,6 @@ def test_run_create_produces_json():
             repository={'path': 'repo'},
             config={},
             config_paths=['/tmp/test.yaml'],
-            hook_context={},
             local_borg_version=None,
             create_arguments=create_arguments,
             global_arguments=global_arguments,

+ 0 - 2
tests/unit/actions/test_extract.py

@@ -7,7 +7,6 @@ def test_run_extract_calls_hooks():
     flexmock(module.logger).answer = lambda message: None
     flexmock(module.borgmatic.config.validate).should_receive('repositories_match').and_return(True)
     flexmock(module.borgmatic.borg.extract).should_receive('extract_archive')
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     extract_arguments = flexmock(
         paths=flexmock(),
         progress=flexmock(),
@@ -22,7 +21,6 @@ def test_run_extract_calls_hooks():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={'repositories': ['repo']},
-        hook_context={},
         local_borg_version=None,
         extract_arguments=extract_arguments,
         global_arguments=global_arguments,

+ 0 - 4
tests/unit/actions/test_prune.py

@@ -7,7 +7,6 @@ def test_run_prune_calls_hooks_for_configured_repository():
     flexmock(module.logger).answer = lambda message: None
     flexmock(module.borgmatic.config.validate).should_receive('repositories_match').never()
     flexmock(module.borgmatic.borg.prune).should_receive('prune_archives').once()
-    flexmock(module.borgmatic.hooks.command).should_receive('execute_hook').times(2)
     prune_arguments = flexmock(repository=None, stats=flexmock(), list_archives=flexmock())
     global_arguments = flexmock(monitoring_verbosity=1, dry_run=False)
 
@@ -15,7 +14,6 @@ def test_run_prune_calls_hooks_for_configured_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={},
-        hook_context={},
         local_borg_version=None,
         prune_arguments=prune_arguments,
         global_arguments=global_arguments,
@@ -38,7 +36,6 @@ def test_run_prune_runs_with_selected_repository():
         config_filename='test.yaml',
         repository={'path': 'repo'},
         config={},
-        hook_context={},
         local_borg_version=None,
         prune_arguments=prune_arguments,
         global_arguments=global_arguments,
@@ -61,7 +58,6 @@ def test_run_prune_bails_if_repository_does_not_match():
         config_filename='test.yaml',
         repository='repo',
         config={},
-        hook_context={},
         local_borg_version=None,
         prune_arguments=prune_arguments,
         global_arguments=global_arguments,

Разница между файлами не показана из-за своего большого размера
+ 267 - 119
tests/unit/commands/test_borgmatic.py


+ 46 - 17
tests/unit/config/test_generate.py

@@ -1,5 +1,3 @@
-from collections import OrderedDict
-
 import pytest
 from flexmock import flexmock
 
@@ -9,7 +7,7 @@ from borgmatic.config import generate as module
 def test_get_properties_with_simple_object():
     schema = {
         'type': 'object',
-        'properties': OrderedDict(
+        'properties': dict(
             [
                 ('field1', {'example': 'Example'}),
             ]
@@ -19,12 +17,12 @@ def test_get_properties_with_simple_object():
     assert module.get_properties(schema) == schema['properties']
 
 
-def test_get_properties_merges_one_of_list_properties():
+def test_get_properties_merges_oneof_list_properties():
     schema = {
         'type': 'object',
         'oneOf': [
             {
-                'properties': OrderedDict(
+                'properties': dict(
                     [
                         ('field1', {'example': 'Example 1'}),
                         ('field2', {'example': 'Example 2'}),
@@ -32,7 +30,7 @@ def test_get_properties_merges_one_of_list_properties():
                 ),
             },
             {
-                'properties': OrderedDict(
+                'properties': dict(
                     [
                         ('field2', {'example': 'Example 2'}),
                         ('field3', {'example': 'Example 3'}),
@@ -47,10 +45,45 @@ def test_get_properties_merges_one_of_list_properties():
     )
 
 
+def test_get_properties_interleaves_oneof_list_properties():
+    schema = {
+        'type': 'object',
+        'oneOf': [
+            {
+                'properties': dict(
+                    [
+                        ('field1', {'example': 'Example 1'}),
+                        ('field2', {'example': 'Example 2'}),
+                        ('field3', {'example': 'Example 3'}),
+                    ]
+                ),
+            },
+            {
+                'properties': dict(
+                    [
+                        ('field4', {'example': 'Example 4'}),
+                        ('field5', {'example': 'Example 5'}),
+                    ]
+                ),
+            },
+        ],
+    }
+
+    assert module.get_properties(schema) == dict(
+        [
+            ('field1', {'example': 'Example 1'}),
+            ('field4', {'example': 'Example 4'}),
+            ('field2', {'example': 'Example 2'}),
+            ('field5', {'example': 'Example 5'}),
+            ('field3', {'example': 'Example 3'}),
+        ]
+    )
+
+
 def test_schema_to_sample_configuration_generates_config_map_with_examples():
     schema = {
         'type': 'object',
-        'properties': OrderedDict(
+        'properties': dict(
             [
                 ('field1', {'example': 'Example 1'}),
                 ('field2', {'example': 'Example 2'}),
@@ -59,12 +92,12 @@ def test_schema_to_sample_configuration_generates_config_map_with_examples():
         ),
     }
     flexmock(module).should_receive('get_properties').and_return(schema['properties'])
-    flexmock(module.ruamel.yaml.comments).should_receive('CommentedMap').replace_with(OrderedDict)
+    flexmock(module.ruamel.yaml.comments).should_receive('CommentedMap').replace_with(dict)
     flexmock(module).should_receive('add_comments_to_configuration_object')
 
     config = module.schema_to_sample_configuration(schema)
 
-    assert config == OrderedDict(
+    assert config == dict(
         [
             ('field1', 'Example 1'),
             ('field2', 'Example 2'),
@@ -88,7 +121,7 @@ def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_e
         'type': 'array',
         'items': {
             'type': 'object',
-            'properties': OrderedDict(
+            'properties': dict(
                 [('field1', {'example': 'Example 1'}), ('field2', {'example': 'Example 2'})]
             ),
         },
@@ -100,7 +133,7 @@ def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_e
 
     config = module.schema_to_sample_configuration(schema)
 
-    assert config == [OrderedDict([('field1', 'Example 1'), ('field2', 'Example 2')])]
+    assert config == [dict([('field1', 'Example 1'), ('field2', 'Example 2')])]
 
 
 def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_multiple_types():
@@ -108,7 +141,7 @@ def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_m
         'type': 'array',
         'items': {
             'type': ['object', 'null'],
-            'properties': OrderedDict(
+            'properties': dict(
                 [('field1', {'example': 'Example 1'}), ('field2', {'example': 'Example 2'})]
             ),
         },
@@ -120,7 +153,7 @@ def test_schema_to_sample_configuration_generates_config_sequence_of_maps_with_m
 
     config = module.schema_to_sample_configuration(schema)
 
-    assert config == [OrderedDict([('field1', 'Example 1'), ('field2', 'Example 2')])]
+    assert config == [dict([('field1', 'Example 1'), ('field2', 'Example 2')])]
 
 
 def test_schema_to_sample_configuration_with_unsupported_schema_raises():
@@ -133,7 +166,6 @@ def test_schema_to_sample_configuration_with_unsupported_schema_raises():
 def test_merge_source_configuration_into_destination_inserts_map_fields():
     destination_config = {'foo': 'dest1', 'bar': 'dest2'}
     source_config = {'foo': 'source1', 'baz': 'source2'}
-    flexmock(module).should_receive('remove_commented_out_sentinel')
     flexmock(module).should_receive('ruamel.yaml.comments.CommentedSeq').replace_with(list)
 
     module.merge_source_configuration_into_destination(destination_config, source_config)
@@ -144,7 +176,6 @@ def test_merge_source_configuration_into_destination_inserts_map_fields():
 def test_merge_source_configuration_into_destination_inserts_nested_map_fields():
     destination_config = {'foo': {'first': 'dest1', 'second': 'dest2'}, 'bar': 'dest3'}
     source_config = {'foo': {'first': 'source1'}}
-    flexmock(module).should_receive('remove_commented_out_sentinel')
     flexmock(module).should_receive('ruamel.yaml.comments.CommentedSeq').replace_with(list)
 
     module.merge_source_configuration_into_destination(destination_config, source_config)
@@ -155,7 +186,6 @@ def test_merge_source_configuration_into_destination_inserts_nested_map_fields()
 def test_merge_source_configuration_into_destination_inserts_sequence_fields():
     destination_config = {'foo': ['dest1', 'dest2'], 'bar': ['dest3'], 'baz': ['dest4']}
     source_config = {'foo': ['source1'], 'bar': ['source2', 'source3']}
-    flexmock(module).should_receive('remove_commented_out_sentinel')
     flexmock(module).should_receive('ruamel.yaml.comments.CommentedSeq').replace_with(list)
 
     module.merge_source_configuration_into_destination(destination_config, source_config)
@@ -170,7 +200,6 @@ def test_merge_source_configuration_into_destination_inserts_sequence_fields():
 def test_merge_source_configuration_into_destination_inserts_sequence_of_maps():
     destination_config = {'foo': [{'first': 'dest1', 'second': 'dest2'}], 'bar': 'dest3'}
     source_config = {'foo': [{'first': 'source1'}, {'other': 'source2'}]}
-    flexmock(module).should_receive('remove_commented_out_sentinel')
     flexmock(module).should_receive('ruamel.yaml.comments.CommentedSeq').replace_with(list)
 
     module.merge_source_configuration_into_destination(destination_config, source_config)

+ 110 - 0
tests/unit/config/test_normalize.py

@@ -123,6 +123,114 @@ def test_normalize_sections_with_only_scalar_raises():
         module.normalize_sections('test.yaml', config)
 
 
+@pytest.mark.parametrize(
+    'config,expected_config,produces_logs',
+    (
+        (
+            {'before_actions': ['foo', 'bar'], 'after_actions': ['baz']},
+            {
+                'commands': [
+                    {'before': 'repository', 'run': ['foo', 'bar']},
+                    {'after': 'repository', 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_backup': ['foo', 'bar'], 'after_backup': ['baz']},
+            {
+                'commands': [
+                    {'before': 'action', 'when': ['create'], 'run': ['foo', 'bar']},
+                    {'after': 'action', 'when': ['create'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_prune': ['foo', 'bar'], 'after_prune': ['baz']},
+            {
+                'commands': [
+                    {'before': 'action', 'when': ['prune'], 'run': ['foo', 'bar']},
+                    {'after': 'action', 'when': ['prune'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_compact': ['foo', 'bar'], 'after_compact': ['baz']},
+            {
+                'commands': [
+                    {'before': 'action', 'when': ['compact'], 'run': ['foo', 'bar']},
+                    {'after': 'action', 'when': ['compact'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_check': ['foo', 'bar'], 'after_check': ['baz']},
+            {
+                'commands': [
+                    {'before': 'action', 'when': ['check'], 'run': ['foo', 'bar']},
+                    {'after': 'action', 'when': ['check'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_extract': ['foo', 'bar'], 'after_extract': ['baz']},
+            {
+                'commands': [
+                    {'before': 'action', 'when': ['extract'], 'run': ['foo', 'bar']},
+                    {'after': 'action', 'when': ['extract'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'on_error': ['foo', 'bar']},
+            {
+                'commands': [
+                    {
+                        'after': 'error',
+                        'when': ['create', 'prune', 'compact', 'check'],
+                        'run': ['foo', 'bar'],
+                    },
+                ]
+            },
+            True,
+        ),
+        (
+            {'before_everything': ['foo', 'bar'], 'after_everything': ['baz']},
+            {
+                'commands': [
+                    {'before': 'everything', 'when': ['create'], 'run': ['foo', 'bar']},
+                    {'after': 'everything', 'when': ['create'], 'run': ['baz']},
+                ]
+            },
+            True,
+        ),
+        (
+            {'other': 'options', 'unrelated_to': 'commands'},
+            {'other': 'options', 'unrelated_to': 'commands'},
+            False,
+        ),
+    ),
+)
+def test_normalize_commands_moves_individual_command_hooks_to_unified_commands(
+    config, expected_config, produces_logs
+):
+    flexmock(module).should_receive('make_command_hook_deprecation_log').and_return(flexmock())
+
+    logs = module.normalize_commands('test.yaml', config)
+
+    assert config == expected_config
+
+    if produces_logs:
+        assert logs
+    else:
+        assert logs == []
+
+
 @pytest.mark.parametrize(
     'config,expected_config,produces_logs',
     (
@@ -262,6 +370,7 @@ def test_normalize_applies_hard_coded_normalization_to_config(
     config, expected_config, produces_logs
 ):
     flexmock(module).should_receive('normalize_sections').and_return([])
+    flexmock(module).should_receive('normalize_commands').and_return([])
 
     logs = module.normalize('test.yaml', config)
     expected_config.setdefault('bootstrap', {})
@@ -276,6 +385,7 @@ def test_normalize_applies_hard_coded_normalization_to_config(
 
 def test_normalize_config_with_borgmatic_source_directory_warns():
     flexmock(module).should_receive('normalize_sections').and_return([])
+    flexmock(module).should_receive('normalize_commands').and_return([])
 
     logs = module.normalize('test.yaml', {'borgmatic_source_directory': '~/.borgmatic'})
 

+ 9 - 0
tests/unit/config/test_validate.py

@@ -202,6 +202,15 @@ def test_guard_configuration_contains_repository_does_not_raise_when_repository_
     )
 
 
+def test_guard_configuration_contains_repository_does_not_raise_when_repository_is_none():
+    flexmock(module).should_receive('repositories_match').never()
+
+    module.guard_configuration_contains_repository(
+        repository=None,
+        configurations={'config.yaml': {'repositories': [{'path': 'foo/bar', 'label': 'repo'}]}},
+    )
+
+
 def test_guard_configuration_contains_repository_errors_when_repository_does_not_match():
     flexmock(module).should_receive('repositories_match').and_return(False)
 

+ 7 - 0
tests/unit/hooks/data_source/test_bootstrap.py

@@ -6,6 +6,9 @@ from borgmatic.hooks.data_source import bootstrap as module
 
 
 def test_dump_data_sources_creates_manifest_file():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     flexmock(module.os).should_receive('makedirs')
 
     flexmock(module.importlib.metadata).should_receive('version').and_return('1.0.0')
@@ -32,6 +35,7 @@ def test_dump_data_sources_creates_manifest_file():
 
 
 def test_dump_data_sources_with_store_config_files_false_does_not_create_manifest_file():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').never()
     flexmock(module.os).should_receive('makedirs').never()
     flexmock(module.json).should_receive('dump').never()
     hook_config = {'store_config_files': False}
@@ -47,6 +51,9 @@ def test_dump_data_sources_with_store_config_files_false_does_not_create_manifes
 
 
 def test_dump_data_sources_with_dry_run_does_not_create_manifest_file():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     flexmock(module.os).should_receive('makedirs').never()
     flexmock(module.json).should_receive('dump').never()
 

+ 18 - 0
tests/unit/hooks/data_source/test_btrfs.py

@@ -269,6 +269,9 @@ def test_make_borg_snapshot_pattern_includes_slashdot_hack_and_stripped_pattern_
 
 
 def test_dump_data_sources_snapshots_each_subvolume_and_updates_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {}}
     flexmock(module).should_receive('get_subvolumes').and_return(
@@ -347,6 +350,9 @@ def test_dump_data_sources_snapshots_each_subvolume_and_updates_patterns():
 
 
 def test_dump_data_sources_uses_custom_btrfs_command_in_commands():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {'btrfs_command': '/usr/local/bin/btrfs'}}
     flexmock(module).should_receive('get_subvolumes').and_return(
@@ -400,6 +406,9 @@ def test_dump_data_sources_uses_custom_btrfs_command_in_commands():
 
 
 def test_dump_data_sources_uses_custom_findmnt_command_in_commands():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {'findmnt_command': '/usr/local/bin/findmnt'}}
     flexmock(module).should_receive('get_subvolumes').with_args(
@@ -455,6 +464,9 @@ def test_dump_data_sources_uses_custom_findmnt_command_in_commands():
 
 
 def test_dump_data_sources_with_dry_run_skips_snapshot_and_patterns_update():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {}}
     flexmock(module).should_receive('get_subvolumes').and_return(
@@ -483,6 +495,9 @@ def test_dump_data_sources_with_dry_run_skips_snapshot_and_patterns_update():
 
 
 def test_dump_data_sources_without_matching_subvolumes_skips_snapshot_and_patterns_update():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {}}
     flexmock(module).should_receive('get_subvolumes').and_return(())
@@ -507,6 +522,9 @@ def test_dump_data_sources_without_matching_subvolumes_skips_snapshot_and_patter
 
 
 def test_dump_data_sources_snapshots_adds_to_existing_exclude_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     patterns = [Pattern('/foo'), Pattern('/mnt/subvol1')]
     config = {'btrfs': {}, 'exclude_patterns': ['/bar']}
     flexmock(module).should_receive('get_subvolumes').and_return(

+ 21 - 0
tests/unit/hooks/data_source/test_lvm.py

@@ -282,6 +282,9 @@ def test_make_borg_snapshot_pattern_includes_slashdot_hack_and_stripped_pattern_
 
 
 def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {}}
     patterns = [Pattern('/mnt/lvolume1/subdir'), Pattern('/mnt/lvolume2')]
     logical_volumes = (
@@ -351,6 +354,9 @@ def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
 
 
 def test_dump_data_sources_with_no_logical_volumes_skips_snapshots():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {}}
     patterns = [Pattern('/mnt/lvolume1/subdir'), Pattern('/mnt/lvolume2')]
     flexmock(module).should_receive('get_logical_volumes').and_return(())
@@ -373,6 +379,9 @@ def test_dump_data_sources_with_no_logical_volumes_skips_snapshots():
 
 
 def test_dump_data_sources_uses_snapshot_size_for_snapshot():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {'snapshot_size': '1000PB'}}
     patterns = [Pattern('/mnt/lvolume1/subdir'), Pattern('/mnt/lvolume2')]
     logical_volumes = (
@@ -448,6 +457,9 @@ def test_dump_data_sources_uses_snapshot_size_for_snapshot():
 
 
 def test_dump_data_sources_uses_custom_commands():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {
         'lvm': {
             'lsblk_command': '/usr/local/bin/lsblk',
@@ -534,6 +546,9 @@ def test_dump_data_sources_uses_custom_commands():
 
 
 def test_dump_data_sources_with_dry_run_skips_snapshots_and_does_not_touch_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {}}
     patterns = [Pattern('/mnt/lvolume1/subdir'), Pattern('/mnt/lvolume2')]
     flexmock(module).should_receive('get_logical_volumes').and_return(
@@ -585,6 +600,9 @@ def test_dump_data_sources_with_dry_run_skips_snapshots_and_does_not_touch_patte
 
 
 def test_dump_data_sources_ignores_mismatch_between_given_patterns_and_contained_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {}}
     patterns = [Pattern('/hmm')]
     logical_volumes = (
@@ -655,6 +673,9 @@ def test_dump_data_sources_ignores_mismatch_between_given_patterns_and_contained
 
 
 def test_dump_data_sources_with_missing_snapshot_errors():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     config = {'lvm': {}}
     patterns = [Pattern('/mnt/lvolume1/subdir'), Pattern('/mnt/lvolume2')]
     flexmock(module).should_receive('get_logical_volumes').and_return(

+ 18 - 0
tests/unit/hooks/data_source/test_mariadb.py

@@ -237,6 +237,9 @@ def test_use_streaming_false_for_no_databases():
 
 
 def test_dump_data_sources_dumps_each_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -278,6 +281,9 @@ def test_dump_data_sources_dumps_each_database():
 
 
 def test_dump_data_sources_dumps_with_password():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     database = {'name': 'foo', 'username': 'root', 'password': 'trustsome1'}
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -312,6 +318,9 @@ def test_dump_data_sources_dumps_with_password():
 
 
 def test_dump_data_sources_dumps_all_databases_at_once():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -343,6 +352,9 @@ def test_dump_data_sources_dumps_all_databases_at_once():
 
 
 def test_dump_data_sources_dumps_all_databases_separately_when_format_configured():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all', 'format': 'sql'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -850,6 +862,9 @@ def test_execute_dump_command_with_dry_run_skips_mariadb_dump():
 
 
 def test_dump_data_sources_errors_for_missing_all_databases():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.os).should_receive('environ').and_return({'USER': 'root'})
@@ -873,6 +888,9 @@ def test_dump_data_sources_errors_for_missing_all_databases():
 
 
 def test_dump_data_sources_does_not_error_for_missing_all_databases_with_dry_run():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.os).should_receive('environ').and_return({'USER': 'root'})

+ 21 - 0
tests/unit/hooks/data_source/test_mongodb.py

@@ -24,6 +24,9 @@ def test_use_streaming_false_for_no_databases():
 
 
 def test_dump_data_sources_runs_mongodump_for_each_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -53,6 +56,9 @@ def test_dump_data_sources_runs_mongodump_for_each_database():
 
 
 def test_dump_data_sources_with_dry_run_skips_mongodump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.dump).should_receive('make_data_source_dump_filename').and_return(
@@ -75,6 +81,9 @@ def test_dump_data_sources_with_dry_run_skips_mongodump():
 
 
 def test_dump_data_sources_runs_mongodump_with_hostname_and_port():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}]
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -111,6 +120,9 @@ def test_dump_data_sources_runs_mongodump_with_hostname_and_port():
 
 
 def test_dump_data_sources_runs_mongodump_with_username_and_password():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [
         {
             'name': 'foo',
@@ -162,6 +174,9 @@ def test_dump_data_sources_runs_mongodump_with_username_and_password():
 
 
 def test_dump_data_sources_runs_mongodump_with_directory_format():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'format': 'directory'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.dump).should_receive('make_data_source_dump_filename').and_return(
@@ -189,6 +204,9 @@ def test_dump_data_sources_runs_mongodump_with_directory_format():
 
 
 def test_dump_data_sources_runs_mongodump_with_options():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'options': '--stuff=such'}]
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -222,6 +240,9 @@ def test_dump_data_sources_runs_mongodump_with_options():
 
 
 def test_dump_data_sources_runs_mongodumpall_for_all_databases():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')

+ 18 - 0
tests/unit/hooks/data_source/test_mysql.py

@@ -134,6 +134,9 @@ def test_use_streaming_false_for_no_databases():
 
 
 def test_dump_data_sources_dumps_each_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -172,6 +175,9 @@ def test_dump_data_sources_dumps_each_database():
 
 
 def test_dump_data_sources_dumps_with_password():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     database = {'name': 'foo', 'username': 'root', 'password': 'trustsome1'}
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -206,6 +212,9 @@ def test_dump_data_sources_dumps_with_password():
 
 
 def test_dump_data_sources_dumps_all_databases_at_once():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     process = flexmock()
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -237,6 +246,9 @@ def test_dump_data_sources_dumps_all_databases_at_once():
 
 
 def test_dump_data_sources_dumps_all_databases_separately_when_format_configured():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all', 'format': 'sql'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -762,6 +774,9 @@ def test_execute_dump_command_with_dry_run_skips_mysqldump():
 
 
 def test_dump_data_sources_errors_for_missing_all_databases():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.os).should_receive('environ').and_return({'USER': 'root'})
@@ -785,6 +800,9 @@ def test_dump_data_sources_errors_for_missing_all_databases():
 
 
 def test_dump_data_sources_does_not_error_for_missing_all_databases_with_dry_run():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     flexmock(module).should_receive('make_dump_path').and_return('')
     flexmock(module.os).should_receive('environ').and_return({'USER': 'root'})

+ 42 - 0
tests/unit/hooks/data_source/test_postgresql.py

@@ -236,6 +236,9 @@ def test_use_streaming_false_for_no_databases():
 
 
 def test_dump_data_sources_runs_pg_dump_for_each_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     processes = [flexmock(), flexmock()]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -284,6 +287,9 @@ def test_dump_data_sources_runs_pg_dump_for_each_database():
 
 
 def test_dump_data_sources_raises_when_no_database_names_to_dump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -301,6 +307,9 @@ def test_dump_data_sources_raises_when_no_database_names_to_dump():
 
 
 def test_dump_data_sources_does_not_raise_when_no_database_names_to_dump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -317,6 +326,9 @@ def test_dump_data_sources_does_not_raise_when_no_database_names_to_dump():
 
 
 def test_dump_data_sources_with_duplicate_dump_skips_pg_dump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -344,6 +356,9 @@ def test_dump_data_sources_with_duplicate_dump_skips_pg_dump():
 
 
 def test_dump_data_sources_with_dry_run_skips_pg_dump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo'}, {'name': 'bar'}]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -374,6 +389,9 @@ def test_dump_data_sources_with_dry_run_skips_pg_dump():
 
 
 def test_dump_data_sources_runs_pg_dump_with_hostname_and_port():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -420,6 +438,9 @@ def test_dump_data_sources_runs_pg_dump_with_hostname_and_port():
 
 
 def test_dump_data_sources_runs_pg_dump_with_username_and_password():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return(
@@ -466,6 +487,9 @@ def test_dump_data_sources_runs_pg_dump_with_username_and_password():
 
 
 def test_dump_data_sources_with_username_injection_attack_gets_escaped():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'username': 'postgres; naughty-command', 'password': 'trustsome1'}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return(
@@ -512,6 +536,9 @@ def test_dump_data_sources_with_username_injection_attack_gets_escaped():
 
 
 def test_dump_data_sources_runs_pg_dump_with_directory_format():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'format': 'directory'}]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
     flexmock(module).should_receive('make_dump_path').and_return('')
@@ -556,6 +583,9 @@ def test_dump_data_sources_runs_pg_dump_with_directory_format():
 
 
 def test_dump_data_sources_runs_pg_dump_with_string_compression():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'compression': 'winrar'}]
     processes = [flexmock()]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -603,6 +633,9 @@ def test_dump_data_sources_runs_pg_dump_with_string_compression():
 
 
 def test_dump_data_sources_runs_pg_dump_with_integer_compression():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'compression': 0}]
     processes = [flexmock()]
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -650,6 +683,9 @@ def test_dump_data_sources_runs_pg_dump_with_integer_compression():
 
 
 def test_dump_data_sources_runs_pg_dump_with_options():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'options': '--stuff=such'}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -693,6 +729,9 @@ def test_dump_data_sources_runs_pg_dump_with_options():
 
 
 def test_dump_data_sources_runs_pg_dumpall_for_all_databases():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'all'}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})
@@ -725,6 +764,9 @@ def test_dump_data_sources_runs_pg_dumpall_for_all_databases():
 
 
 def test_dump_data_sources_runs_non_default_pg_dump():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'name': 'foo', 'pg_dump_command': 'special_pg_dump --compress *'}]
     process = flexmock()
     flexmock(module).should_receive('make_environment').and_return({'PGSSLMODE': 'disable'})

+ 18 - 0
tests/unit/hooks/data_source/test_sqlite.py

@@ -17,6 +17,9 @@ def test_use_streaming_false_for_no_databases():
 
 
 def test_dump_data_sources_logs_and_skips_if_dump_already_exists():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'path': '/path/to/database', 'name': 'database'}]
 
     flexmock(module).should_receive('make_dump_path').and_return('/run/borgmatic')
@@ -41,6 +44,9 @@ def test_dump_data_sources_logs_and_skips_if_dump_already_exists():
 
 
 def test_dump_data_sources_dumps_each_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [
         {'path': '/path/to/database1', 'name': 'database1'},
         {'path': '/path/to/database2', 'name': 'database2'},
@@ -71,6 +77,9 @@ def test_dump_data_sources_dumps_each_database():
 
 
 def test_dump_data_sources_with_path_injection_attack_gets_escaped():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [
         {'path': '/path/to/database1; naughty-command', 'name': 'database1'},
     ]
@@ -108,6 +117,9 @@ def test_dump_data_sources_with_path_injection_attack_gets_escaped():
 
 
 def test_dump_data_sources_with_non_existent_path_warns_and_dumps_database():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [
         {'path': '/path/to/database1', 'name': 'database1'},
     ]
@@ -136,6 +148,9 @@ def test_dump_data_sources_with_non_existent_path_warns_and_dumps_database():
 
 
 def test_dump_data_sources_with_name_all_warns_and_dumps_all_databases():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [
         {'path': '/path/to/database1', 'name': 'all'},
     ]
@@ -166,6 +181,9 @@ def test_dump_data_sources_with_name_all_warns_and_dumps_all_databases():
 
 
 def test_dump_data_sources_does_not_dump_if_dry_run():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     databases = [{'path': '/path/to/database', 'name': 'database'}]
 
     flexmock(module).should_receive('make_dump_path').and_return('/run/borgmatic')

+ 15 - 0
tests/unit/hooks/data_source/test_zfs.py

@@ -296,6 +296,9 @@ def test_make_borg_snapshot_pattern_includes_slashdot_hack_and_stripped_pattern_
 
 
 def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     dataset = flexmock(
         name='dataset',
         mount_point='/mnt/dataset',
@@ -338,6 +341,9 @@ def test_dump_data_sources_snapshots_and_mounts_and_updates_patterns():
 
 
 def test_dump_data_sources_with_no_datasets_skips_snapshots():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     flexmock(module).should_receive('get_datasets_to_backup').and_return(())
     flexmock(module.os).should_receive('getpid').and_return(1234)
     flexmock(module).should_receive('snapshot_dataset').never()
@@ -360,6 +366,9 @@ def test_dump_data_sources_with_no_datasets_skips_snapshots():
 
 
 def test_dump_data_sources_uses_custom_commands():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     dataset = flexmock(
         name='dataset',
         mount_point='/mnt/dataset',
@@ -409,6 +418,9 @@ def test_dump_data_sources_uses_custom_commands():
 
 
 def test_dump_data_sources_with_dry_run_skips_commands_and_does_not_touch_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     flexmock(module).should_receive('get_datasets_to_backup').and_return(
         (flexmock(name='dataset', mount_point='/mnt/dataset'),)
     )
@@ -433,6 +445,9 @@ def test_dump_data_sources_with_dry_run_skips_commands_and_does_not_touch_patter
 
 
 def test_dump_data_sources_ignores_mismatch_between_given_patterns_and_contained_patterns():
+    flexmock(module.borgmatic.hooks.command).should_receive('Before_after_hooks').and_return(
+        flexmock()
+    )
     dataset = flexmock(
         name='dataset',
         mount_point='/mnt/dataset',

+ 482 - 39
tests/unit/hooks/test_command.py

@@ -1,6 +1,7 @@
 import logging
 import subprocess
 
+import pytest
 from flexmock import flexmock
 
 from borgmatic.hooks import command as module
@@ -48,43 +49,274 @@ def test_make_environment_with_pyinstaller_and_LD_LIBRARY_PATH_ORIG_copies_it_in
     ) == {'LD_LIBRARY_PATH_ORIG': '/lib/lib/lib', 'LD_LIBRARY_PATH': '/lib/lib/lib'}
 
 
-def test_execute_hook_invokes_each_command():
+@pytest.mark.parametrize(
+    'hooks,filters,expected_hooks',
+    (
+        (
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'after': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'repository',
+                    'run': ['baz'],
+                },
+            ),
+            {},
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'after': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'repository',
+                    'run': ['baz'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'after': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'repository',
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'before': 'action',
+            },
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'after': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'after': 'repository',
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'after': 'action',
+            },
+            (
+                {
+                    'after': 'action',
+                    'run': ['foo'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'hooks': ['postgresql'],
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'dump_data_sources',
+                    'hooks': ['lvm'],
+                    'run': ['bar'],
+                },
+                {
+                    'after': 'dump_data_sources',
+                    'hooks': ['lvm'],
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'before': 'dump_data_sources',
+                'hook_name': 'lvm',
+            },
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'hooks': ['lvm'],
+                    'run': ['bar'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'dump_data_sources',
+                    'run': ['bar'],
+                },
+                {
+                    'after': 'dump_data_sources',
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'before': 'dump_data_sources',
+                'hook_name': 'lvm',
+            },
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'dump_data_sources',
+                    'run': ['bar'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'hooks': ['postgresql', 'zfs', 'lvm'],
+                    'run': ['foo'],
+                },
+            ),
+            {
+                'before': 'dump_data_sources',
+                'hook_name': 'lvm',
+            },
+            (
+                {
+                    'before': 'dump_data_sources',
+                    'hooks': ['postgresql', 'zfs', 'lvm'],
+                    'run': ['foo'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'action',
+                    'when': ['create'],
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'action',
+                    'when': ['prune'],
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'action',
+                    'when': ['compact'],
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'before': 'action',
+                'action_names': ['create', 'compact', 'extract'],
+            },
+            (
+                {
+                    'before': 'action',
+                    'when': ['create'],
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'action',
+                    'when': ['compact'],
+                    'run': ['baz'],
+                },
+            ),
+        ),
+        (
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'action',
+                    'run': ['baz'],
+                },
+            ),
+            {
+                'before': 'action',
+                'action_names': ['create', 'compact', 'extract'],
+            },
+            (
+                {
+                    'before': 'action',
+                    'run': ['foo'],
+                },
+                {
+                    'before': 'action',
+                    'run': ['bar'],
+                },
+                {
+                    'before': 'action',
+                    'run': ['baz'],
+                },
+            ),
+        ),
+    ),
+)
+def test_filter_hooks(hooks, filters, expected_hooks):
+    assert module.filter_hooks(hooks, **filters) == expected_hooks
+
+
+LOGGING_ANSWER = flexmock()
+
+
+def test_execute_hooks_invokes_each_hook_and_command():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
     flexmock(module).should_receive('interpolate_context').replace_with(
         lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
-    flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
-        [':'],
-        output_log_level=logging.WARNING,
-        shell=True,
-        environment={},
-    ).once()
 
-    module.execute_hook([':'], None, 'config.yaml', 'pre-backup', dry_run=False)
-
-
-def test_execute_hook_with_multiple_commands_invokes_each_command():
-    flexmock(module).should_receive('interpolate_context').replace_with(
-        lambda hook_description, command, context: command
+    for command in ('foo', 'bar', 'baz'):
+        flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
+            [command],
+            output_log_level=LOGGING_ANSWER,
+            shell=True,
+            environment={},
+        ).once()
+
+    module.execute_hooks(
+        [{'before': 'create', 'run': ['foo']}, {'before': 'create', 'run': ['bar', 'baz']}],
+        umask=None,
+        dry_run=False,
     )
-    flexmock(module).should_receive('make_environment').and_return({})
-    flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
-        [':'],
-        output_log_level=logging.WARNING,
-        shell=True,
-        environment={},
-    ).once()
-    flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
-        ['true'],
-        output_log_level=logging.WARNING,
-        shell=True,
-        environment={},
-    ).once()
-
-    module.execute_hook([':', 'true'], None, 'config.yaml', 'pre-backup', dry_run=False)
 
 
-def test_execute_hook_with_umask_sets_that_umask():
+def test_execute_hooks_with_umask_sets_that_umask():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
     flexmock(module).should_receive('interpolate_context').replace_with(
         lambda hook_description, command, context: command
     )
@@ -92,42 +324,253 @@ def test_execute_hook_with_umask_sets_that_umask():
     flexmock(module.os).should_receive('umask').with_args(0o22).once()
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
-        [':'],
-        output_log_level=logging.WARNING,
+        ['foo'],
+        output_log_level=logging.ANSWER,
         shell=True,
         environment={},
     )
 
-    module.execute_hook([':'], 77, 'config.yaml', 'pre-backup', dry_run=False)
+    module.execute_hooks([{'before': 'create', 'run': ['foo']}], umask=77, dry_run=False)
 
 
-def test_execute_hook_with_dry_run_skips_commands():
+def test_execute_hooks_with_dry_run_skips_commands():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
     flexmock(module).should_receive('interpolate_context').replace_with(
         lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').never()
 
-    module.execute_hook([':', 'true'], None, 'config.yaml', 'pre-backup', dry_run=True)
+    module.execute_hooks([{'before': 'create', 'run': ['foo']}], umask=None, dry_run=True)
 
 
-def test_execute_hook_with_empty_commands_does_not_raise():
-    module.execute_hook([], None, 'config.yaml', 'post-backup', dry_run=False)
+def test_execute_hooks_with_empty_commands_does_not_raise():
+    module.execute_hooks([], umask=None, dry_run=True)
 
 
-def test_execute_hook_on_error_logs_as_error():
+def test_execute_hooks_with_error_logs_as_error():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
     flexmock(module).should_receive('interpolate_context').replace_with(
         lambda hook_description, command, context: command
     )
     flexmock(module).should_receive('make_environment').and_return({})
     flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
-        [':'],
+        ['foo'],
         output_log_level=logging.ERROR,
         shell=True,
         environment={},
     ).once()
 
-    module.execute_hook([':'], None, 'config.yaml', 'on-error', dry_run=False)
+    module.execute_hooks([{'after': 'error', 'run': ['foo']}], umask=None, dry_run=False)
+
+
+def test_execute_hooks_with_before_or_after_raises():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
+    flexmock(module).should_receive('interpolate_context').never()
+    flexmock(module).should_receive('make_environment').never()
+    flexmock(module.borgmatic.execute).should_receive('execute_command').never()
+
+    with pytest.raises(ValueError):
+        module.execute_hooks(
+            [
+                {'erstwhile': 'create', 'run': ['foo']},
+                {'erstwhile': 'create', 'run': ['bar', 'baz']},
+            ],
+            umask=None,
+            dry_run=False,
+        )
+
+
+def test_execute_hooks_without_commands_to_run_does_not_raise():
+    flexmock(module.borgmatic.logger).should_receive('add_custom_log_levels')
+    flexmock(module.logging).ANSWER = LOGGING_ANSWER
+    flexmock(module).should_receive('interpolate_context').replace_with(
+        lambda hook_description, command, context: command
+    )
+    flexmock(module).should_receive('make_environment').and_return({})
+
+    for command in ('foo', 'bar'):
+        flexmock(module.borgmatic.execute).should_receive('execute_command').with_args(
+            [command],
+            output_log_level=LOGGING_ANSWER,
+            shell=True,
+            environment={},
+        ).once()
+
+    module.execute_hooks(
+        [{'before': 'create', 'run': []}, {'before': 'create', 'run': ['foo', 'bar']}],
+        umask=None,
+        dry_run=False,
+    )
+
+
+def test_before_after_hooks_calls_command_hooks():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').twice()
+
+    with module.Before_after_hooks(
+        command_hooks=commands,
+        before_after='action',
+        umask=1234,
+        dry_run=False,
+        hook_name='myhook',
+        action_names=['create'],
+        context1='stuff',
+        context2='such',
+    ):
+        pass
+
+
+def test_before_after_hooks_with_before_error_raises_and_skips_after_hook():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).never()
+    flexmock(module).should_receive('execute_hooks').and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(False)
+
+    with pytest.raises(ValueError):
+        with module.Before_after_hooks(
+            command_hooks=commands,
+            before_after='action',
+            umask=1234,
+            dry_run=False,
+            hook_name='myhook',
+            action_names=['create'],
+            context1='stuff',
+            context2='such',
+        ):
+            assert False  # This should never get called.
+
+
+def test_before_after_hooks_with_before_soft_failure_does_not_raise():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(True)
+
+    with module.Before_after_hooks(
+        command_hooks=commands,
+        before_after='action',
+        umask=1234,
+        dry_run=False,
+        hook_name='myhook',
+        action_names=['create'],
+        context1='stuff',
+        context2='such',
+    ):
+        pass
+
+
+def test_before_after_hooks_with_after_error_raises():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_return(None).and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(False)
+
+    with pytest.raises(ValueError):
+        with module.Before_after_hooks(
+            command_hooks=commands,
+            before_after='action',
+            umask=1234,
+            dry_run=False,
+            hook_name='myhook',
+            action_names=['create'],
+            context1='stuff',
+            context2='such',
+        ):
+            pass
+
+
+def test_before_after_hooks_with_after_soft_failure_does_not_raise():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_return(None).and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(True)
+
+    with module.Before_after_hooks(
+        command_hooks=commands,
+        before_after='action',
+        umask=1234,
+        dry_run=False,
+        hook_name='myhook',
+        action_names=['create'],
+        context1='stuff',
+        context2='such',
+    ):
+        pass
 
 
 def test_considered_soft_failure_treats_soft_fail_exit_code_as_soft_fail():

Некоторые файлы не были показаны из-за большого количества измененных файлов