Selaa lähdekoodia

Correctly handle errors in command hooks (#1019).

Dan Helfman 3 kuukautta sitten
vanhempi
sitoutus
7965eb9de3

+ 55 - 44
borgmatic/commands/borgmatic.py

@@ -207,9 +207,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
                     global_arguments.dry_run,
                 )
         except (OSError, CalledProcessError) as error:
-            if not command.considered_soft_failure(error):
-                encountered_error = error
-                yield from log_error_records('Error pinging monitor', error)
+            encountered_error = error
+            yield from log_error_records('Error pinging monitor', error)
 
         if not encountered_error:
             try:
@@ -231,27 +230,35 @@ def run_configuration(config_filename, config, config_paths, arguments):
                         global_arguments.dry_run,
                     )
             except (OSError, CalledProcessError) as error:
-                if command.considered_soft_failure(error):
-                    return
-
                 encountered_error = error
                 yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
+            else:
+                return
 
-        if encountered_error:
-            try:
-                command.execute_hooks(
-                    command.filter_hooks(
-                        config.get('commands'), after='error', action_names=arguments.keys()
-                    ),
-                    config.get('umask'),
-                    global_arguments.dry_run,
-                    configuration_filename=config_filename,
-                    log_file=arguments['global'].log_file,
-                    repository=error_repository.get('path', '') if error_repository else '',
-                    repository_label=error_repository.get('label', '') if error_repository else '',
-                    error=encountered_error,
-                    output=getattr(encountered_error, 'output', ''),
-                )
+        try:
+            command.execute_hooks(
+                command.filter_hooks(
+                    config.get('commands'), after='error', action_names=arguments.keys()
+                ),
+                config.get('umask'),
+                global_arguments.dry_run,
+                configuration_filename=config_filename,
+                log_file=arguments['global'].log_file,
+                repository=error_repository.get('path', '') if error_repository else '',
+                repository_label=error_repository.get('label', '') if error_repository else '',
+                error=encountered_error,
+                output=getattr(encountered_error, 'output', ''),
+            )
+        except (OSError, CalledProcessError) as error:
+            if command.considered_soft_failure(error):
+                return
+
+            yield from log_error_records(
+                f'{config_filename}: Error running after error hook', error
+            )
+
+        try:
+            if monitoring_hooks_are_activated:
                 dispatch.call_hooks(
                     'ping_monitor',
                     config,
@@ -268,13 +275,8 @@ def run_configuration(config_filename, config, config_paths, arguments):
                     monitoring_log_level,
                     global_arguments.dry_run,
                 )
-            except (OSError, CalledProcessError) as error:
-                if command.considered_soft_failure(error):
-                    return
-
-                yield from log_error_records(
-                    f'{config_filename}: Error running after error hook', error
-                )
+        except (OSError, CalledProcessError) as error:
+            yield from log_error_records(f'{config_filename}: Error pinging monitor', error)
 
 
 def run_actions(
@@ -843,24 +845,33 @@ def collect_configuration_run_summary_logs(configs, config_paths, arguments):
 
     for config_filename, config in configs.items():
         with Log_prefix(config_filename):
-            results = list(run_configuration(config_filename, config, config_paths, arguments))
-            error_logs = tuple(
-                result for result in results if isinstance(result, logging.LogRecord)
-            )
-
-            if error_logs:
-                yield from log_error_records('An error occurred')
-                yield from error_logs
+            try:
+                results = list(run_configuration(config_filename, config, config_paths, arguments))
+            except (OSError, CalledProcessError, ValueError) as error:
+                yield from log_error_records(
+                    'Error running configuration file',
+                    error,
+                    levelno=logging.CRITICAL,
+                    log_command_error_output=True,
+                )
             else:
-                yield logging.makeLogRecord(
-                    dict(
-                        levelno=logging.INFO,
-                        levelname='INFO',
-                        msg='Successfully ran configuration file',
-                    )
+                error_logs = tuple(
+                    result for result in results if isinstance(result, logging.LogRecord)
                 )
-                if results:
-                    json_results.extend(results)
+
+                if error_logs:
+                    yield from log_error_records('An error occurred')
+                    yield from error_logs
+                else:
+                    yield logging.makeLogRecord(
+                        dict(
+                            levelno=logging.INFO,
+                            levelname='INFO',
+                            msg='Successfully ran configuration file',
+                        )
+                    )
+                    if results:
+                        json_results.extend(results)
 
     if 'umount' in arguments:
         logger.info(f"Unmounting mount point {arguments['umount'].mount_point}")

+ 37 - 24
borgmatic/hooks/command.py

@@ -2,6 +2,7 @@ import logging
 import os
 import re
 import shlex
+import subprocess
 import sys
 
 import borgmatic.execute
@@ -53,7 +54,7 @@ def filter_hooks(command_hooks, before=None, after=None, hook_name=None, action_
     '''
     return tuple(
         hook_config
-        for hook_config in command_hooks
+        for hook_config in command_hooks or ()
         for config_hook_names in (hook_config.get('hooks'),)
         for config_action_names in (hook_config.get('when'),)
         if before is None or hook_config.get('before') == before
@@ -97,7 +98,7 @@ def execute_hooks(command_hooks, umask, dry_run, **context):
         commands = [interpolate_context(description, command, context) for command in commands]
 
         if len(commands) == 1:
-            logger.info(f'Running command for {description} hook{dry_run_label}')
+            logger.info(f'Running {description} command hook{dry_run_label}')
         else:
             logger.info(
                 f'Running {len(commands)} commands for {description} hook{dry_run_label}',
@@ -176,33 +177,45 @@ class Before_after_hooks:
         '''
         Run the configured "before" command hooks that match the initialized data points.
         '''
-        execute_hooks(
-            borgmatic.hooks.command.filter_hooks(
-                self.command_hooks,
-                before=self.before_after,
-                hook_name=self.hook_name,
-                action_names=self.action_names,
-            ),
-            self.umask,
-            self.dry_run,
-            **self.context,
-        )
+        try:
+            execute_hooks(
+                borgmatic.hooks.command.filter_hooks(
+                    self.command_hooks,
+                    before=self.before_after,
+                    hook_name=self.hook_name,
+                    action_names=self.action_names,
+                ),
+                self.umask,
+                self.dry_run,
+                **self.context,
+            )
+        except (OSError, subprocess.CalledProcessError) as error:
+            if considered_soft_failure(error):
+                return
+
+            raise ValueError(f'Error running before {self.before_after} hook: {error}')
 
     def __exit__(self, exception, value, traceback):
         '''
         Run the configured "after" command hooks that match the initialized data points.
         '''
-        execute_hooks(
-            borgmatic.hooks.command.filter_hooks(
-                self.command_hooks,
-                after=self.before_after,
-                hook_name=self.hook_name,
-                action_names=self.action_names,
-            ),
-            self.umask,
-            self.dry_run,
-            **self.context,
-        )
+        try:
+            execute_hooks(
+                borgmatic.hooks.command.filter_hooks(
+                    self.command_hooks,
+                    after=self.before_after,
+                    hook_name=self.hook_name,
+                    action_names=self.action_names,
+                ),
+                self.umask,
+                self.dry_run,
+                **self.context,
+            )
+        except (OSError, subprocess.CalledProcessError) as error:
+            if considered_soft_failure(error):
+                return
+
+            raise ValueError(f'Error running before {self.before_after} hook: {error}')
 
 
 def considered_soft_failure(error):

+ 48 - 33
tests/unit/commands/test_borgmatic.py

@@ -189,30 +189,6 @@ def test_run_configuration_logs_monitor_log_error():
     assert results == expected_results
 
 
-def test_run_configuration_still_pings_monitor_for_monitor_log_soft_failure():
-    flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
-    flexmock(module).should_receive('get_skip_actions').and_return([])
-    flexmock(module.command).should_receive('Before_after_hooks').and_return(flexmock())
-    flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
-    error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
-    flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
-        None
-    ).and_raise(error).and_return(None).and_return(None).times(5)
-    flexmock(module).should_receive('log_error_records').never()
-    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
-    flexmock(module).should_receive('run_actions').and_return([])
-    flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
-    config = {'repositories': [{'path': 'foo'}]}
-    arguments = {
-        'global': flexmock(monitoring_verbosity=1, dry_run=False, log_file=flexmock()),
-        'create': flexmock(),
-    }
-
-    results = list(module.run_configuration('test.yaml', config, ['/tmp/test.yaml'], arguments))
-
-    assert results == []
-
-
 def test_run_configuration_logs_monitor_finish_error():
     flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
     flexmock(module).should_receive('get_skip_actions').and_return([])
@@ -238,19 +214,38 @@ def test_run_configuration_logs_monitor_finish_error():
     assert results == expected_results
 
 
-def test_run_configuration_bails_for_monitor_finish_soft_failure():
+def test_run_configuration_logs_monitor_fail_error():
     flexmock(module).should_receive('verbosity_to_log_level').and_return(logging.INFO)
     flexmock(module).should_receive('get_skip_actions').and_return([])
     flexmock(module.command).should_receive('Before_after_hooks').and_return(flexmock())
     flexmock(module.borg_version).should_receive('local_borg_version').and_return(flexmock())
-    error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
-    flexmock(module.dispatch).should_receive('call_hooks').and_return(None).and_return(
-        None
-    ).and_raise(None).and_raise(error)
-    flexmock(module).should_receive('log_error_records').never()
+    flexmock(module.dispatch).should_receive('call_hooks')
+
+    # Trigger an error in the monitor finish so that the monitor fail also gets triggered.
+    flexmock(module.dispatch).should_receive('call_hooks').with_args(
+        'ping_monitor',
+        object,
+        module.dispatch.Hook_type.MONITORING,
+        object,
+        module.monitor.State.FINISH,
+        object,
+        object,
+    ).and_raise(OSError)
+    flexmock(module.dispatch).should_receive('call_hooks').with_args(
+        'ping_monitor',
+        object,
+        module.dispatch.Hook_type.MONITORING,
+        object,
+        module.monitor.State.FAIL,
+        object,
+        object,
+    ).and_raise(OSError).once()
+    expected_results = [flexmock()]
+    flexmock(module).should_receive('log_error_records').and_return(expected_results)
     flexmock(module).should_receive('Log_prefix').and_return(flexmock())
     flexmock(module).should_receive('run_actions').and_return([])
-    flexmock(module.command).should_receive('considered_soft_failure').and_return(True)
+    flexmock(module.command).should_receive('filter_hooks')
+    flexmock(module.command).should_receive('execute_hooks')
     config = {'repositories': [{'path': 'foo'}]}
     arguments = {
         'global': flexmock(monitoring_verbosity=1, dry_run=False, log_file=flexmock()),
@@ -259,7 +254,7 @@ def test_run_configuration_bails_for_monitor_finish_soft_failure():
 
     results = list(module.run_configuration('test.yaml', config, ['/tmp/test.yaml'], arguments))
 
-    assert results == []
+    assert results == expected_results + expected_results
 
 
 def test_run_configuration_does_not_call_monitoring_hooks_if_monitoring_hooks_are_disabled():
@@ -1700,7 +1695,7 @@ def test_collect_configuration_run_summary_logs_info_for_success_with_list():
     assert {log.levelno for log in logs} == {logging.INFO}
 
 
-def test_collect_configuration_run_summary_logs_run_configuration_error():
+def test_collect_configuration_run_summary_logs_run_configuration_error_logs():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
     flexmock(module.command).should_receive('filter_hooks')
     flexmock(module.command).should_receive('execute_hooks')
@@ -1720,6 +1715,26 @@ def test_collect_configuration_run_summary_logs_run_configuration_error():
     assert {log.levelno for log in logs} == {logging.CRITICAL}
 
 
+def test_collect_configuration_run_summary_logs_run_configuration_exception():
+    flexmock(module.validate).should_receive('guard_configuration_contains_repository')
+    flexmock(module.command).should_receive('filter_hooks')
+    flexmock(module.command).should_receive('execute_hooks')
+    flexmock(module).should_receive('Log_prefix').and_return(flexmock())
+    flexmock(module).should_receive('run_configuration').and_raise(ValueError)
+    flexmock(module).should_receive('log_error_records').and_return(
+        [flexmock(levelno=logging.CRITICAL)]
+    )
+    arguments = {'global': flexmock(dry_run=False, log_file=flexmock())}
+
+    logs = tuple(
+        module.collect_configuration_run_summary_logs(
+            {'test.yaml': {}}, config_paths=['/tmp/test.yaml'], arguments=arguments
+        )
+    )
+
+    assert {log.levelno for log in logs} == {logging.CRITICAL}
+
+
 def test_collect_configuration_run_summary_logs_run_umount_error():
     flexmock(module.validate).should_receive('guard_configuration_contains_repository')
     flexmock(module.command).should_receive('filter_hooks')

+ 134 - 0
tests/unit/hooks/test_command.py

@@ -439,6 +439,140 @@ def test_before_after_hooks_calls_command_hooks():
         pass
 
 
+def test_before_after_hooks_with_before_error_raises_and_skips_after_hook():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).never()
+    flexmock(module).should_receive('execute_hooks').and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(False)
+
+    with pytest.raises(ValueError):
+        with module.Before_after_hooks(
+            command_hooks=commands,
+            before_after='action',
+            umask=1234,
+            dry_run=False,
+            hook_name='myhook',
+            action_names=['create'],
+            context1='stuff',
+            context2='such',
+        ):
+            assert False  # This should never get called.
+
+
+def test_before_after_hooks_with_before_soft_failure_does_not_raise():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(True)
+
+    with module.Before_after_hooks(
+        command_hooks=commands,
+        before_after='action',
+        umask=1234,
+        dry_run=False,
+        hook_name='myhook',
+        action_names=['create'],
+        context1='stuff',
+        context2='such',
+    ):
+        pass
+
+
+def test_before_after_hooks_with_after_error_raises():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_return(None).and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(False)
+
+    with pytest.raises(ValueError):
+        with module.Before_after_hooks(
+            command_hooks=commands,
+            before_after='action',
+            umask=1234,
+            dry_run=False,
+            hook_name='myhook',
+            action_names=['create'],
+            context1='stuff',
+            context2='such',
+        ):
+            pass
+
+
+def test_before_after_hooks_with_after_soft_failure_does_not_raise():
+    commands = [
+        {'before': 'repository', 'run': ['foo', 'bar']},
+        {'after': 'repository', 'run': ['baz']},
+    ]
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        before='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('filter_hooks').with_args(
+        commands,
+        after='action',
+        hook_name='myhook',
+        action_names=['create'],
+    ).and_return(flexmock()).once()
+    flexmock(module).should_receive('execute_hooks').and_return(None).and_raise(OSError)
+    flexmock(module).should_receive('considered_soft_failure').and_return(True)
+
+    with module.Before_after_hooks(
+        command_hooks=commands,
+        before_after='action',
+        umask=1234,
+        dry_run=False,
+        hook_name='myhook',
+        action_names=['create'],
+        context1='stuff',
+        context2='such',
+    ):
+        pass
+
+
 def test_considered_soft_failure_treats_soft_fail_exit_code_as_soft_fail():
     error = subprocess.CalledProcessError(module.SOFT_FAIL_EXIT_CODE, 'try again')