|
@@ -1,3 +1,4 @@
|
|
|
|
+import logging
|
|
import subprocess
|
|
import subprocess
|
|
|
|
|
|
from flexmock import flexmock
|
|
from flexmock import flexmock
|
|
@@ -5,6 +6,90 @@ from flexmock import flexmock
|
|
from borgmatic.commands import borgmatic as module
|
|
from borgmatic.commands import borgmatic as module
|
|
|
|
|
|
|
|
|
|
|
|
+def test_run_configuration_runs_actions_for_each_repository():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ expected_results = [flexmock(), flexmock()]
|
|
|
|
+ flexmock(module).should_receive('run_actions').and_return(expected_results[:1]).and_return(
|
|
|
|
+ expected_results[1:]
|
|
|
|
+ )
|
|
|
|
+ config = {'location': {'repositories': ['foo', 'bar']}}
|
|
|
|
+ arguments = {'global': flexmock()}
|
|
|
|
+
|
|
|
|
+ results = list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+ assert results == expected_results
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_run_configuration_executes_hooks_for_create_action():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ flexmock(module.hook).should_receive('execute_hook').twice()
|
|
|
|
+ flexmock(module).should_receive('run_actions').and_return([])
|
|
|
|
+ config = {'location': {'repositories': ['foo']}}
|
|
|
|
+ arguments = {'global': flexmock(dry_run=False), 'create': flexmock()}
|
|
|
|
+
|
|
|
|
+ list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_run_configuration_logs_actions_error():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ flexmock(module.hook).should_receive('execute_hook')
|
|
|
|
+ expected_results = [flexmock()]
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
|
|
|
|
+ flexmock(module).should_receive('run_actions').and_raise(OSError)
|
|
|
|
+ config = {'location': {'repositories': ['foo']}}
|
|
|
|
+ arguments = {'global': flexmock(dry_run=False)}
|
|
|
|
+
|
|
|
|
+ results = list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+ assert results == expected_results
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_run_configuration_logs_pre_hook_error():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ flexmock(module.hook).should_receive('execute_hook').and_raise(OSError).and_return(None)
|
|
|
|
+ expected_results = [flexmock()]
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
|
|
|
|
+ flexmock(module).should_receive('run_actions').never()
|
|
|
|
+ config = {'location': {'repositories': ['foo']}}
|
|
|
|
+ arguments = {'global': flexmock(dry_run=False), 'create': flexmock()}
|
|
|
|
+
|
|
|
|
+ results = list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+ assert results == expected_results
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_run_configuration_logs_post_hook_error():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ flexmock(module.hook).should_receive('execute_hook').and_return(None).and_raise(
|
|
|
|
+ OSError
|
|
|
|
+ ).and_return(None)
|
|
|
|
+ expected_results = [flexmock()]
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
|
|
|
|
+ flexmock(module).should_receive('run_actions').and_return([])
|
|
|
|
+ config = {'location': {'repositories': ['foo']}}
|
|
|
|
+ arguments = {'global': flexmock(dry_run=False), 'create': flexmock()}
|
|
|
|
+
|
|
|
|
+ results = list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+ assert results == expected_results
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_run_configuration_logs_on_error_hook_error():
|
|
|
|
+ flexmock(module.borg_environment).should_receive('initialize')
|
|
|
|
+ flexmock(module.hook).should_receive('execute_hook').and_raise(OSError)
|
|
|
|
+ expected_results = [flexmock(), flexmock()]
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(
|
|
|
|
+ expected_results[:1]
|
|
|
|
+ ).and_return(expected_results[1:])
|
|
|
|
+ flexmock(module).should_receive('run_actions').and_raise(OSError)
|
|
|
|
+ config = {'location': {'repositories': ['foo']}}
|
|
|
|
+ arguments = {'global': flexmock(dry_run=False)}
|
|
|
|
+
|
|
|
|
+ results = list(module.run_configuration('test.yaml', config, arguments))
|
|
|
|
+
|
|
|
|
+ assert results == expected_results
|
|
|
|
+
|
|
|
|
+
|
|
def test_load_configurations_collects_parsed_configurations():
|
|
def test_load_configurations_collects_parsed_configurations():
|
|
configuration = flexmock()
|
|
configuration = flexmock()
|
|
other_configuration = flexmock()
|
|
other_configuration = flexmock()
|
|
@@ -24,34 +109,40 @@ def test_load_configurations_logs_critical_for_parse_error():
|
|
configs, logs = tuple(module.load_configurations(('test.yaml',)))
|
|
configs, logs = tuple(module.load_configurations(('test.yaml',)))
|
|
|
|
|
|
assert configs == {}
|
|
assert configs == {}
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_make_error_log_records_generates_output_logs_for_message_only():
|
|
|
|
+ logs = tuple(module.make_error_log_records('Error'))
|
|
|
|
+
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
|
|
|
|
|
|
|
|
def test_make_error_log_records_generates_output_logs_for_called_process_error():
|
|
def test_make_error_log_records_generates_output_logs_for_called_process_error():
|
|
logs = tuple(
|
|
logs = tuple(
|
|
module.make_error_log_records(
|
|
module.make_error_log_records(
|
|
- subprocess.CalledProcessError(1, 'ls', 'error output'), 'Error'
|
|
|
|
|
|
+ 'Error', subprocess.CalledProcessError(1, 'ls', 'error output')
|
|
)
|
|
)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
assert any(log for log in logs if 'error output' in str(log))
|
|
assert any(log for log in logs if 'error output' in str(log))
|
|
|
|
|
|
|
|
|
|
def test_make_error_log_records_generates_logs_for_value_error():
|
|
def test_make_error_log_records_generates_logs_for_value_error():
|
|
- logs = tuple(module.make_error_log_records(ValueError(), 'Error'))
|
|
|
|
|
|
+ logs = tuple(module.make_error_log_records('Error', ValueError()))
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
|
|
|
|
|
|
|
|
def test_make_error_log_records_generates_logs_for_os_error():
|
|
def test_make_error_log_records_generates_logs_for_os_error():
|
|
- logs = tuple(module.make_error_log_records(OSError(), 'Error'))
|
|
|
|
|
|
+ logs = tuple(module.make_error_log_records('Error', OSError()))
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
|
|
|
|
|
|
|
|
def test_make_error_log_records_generates_nothing_for_other_error():
|
|
def test_make_error_log_records_generates_nothing_for_other_error():
|
|
- logs = tuple(module.make_error_log_records(KeyError(), 'Error'))
|
|
|
|
|
|
+ logs = tuple(module.make_error_log_records('Error', KeyError()))
|
|
|
|
|
|
assert logs == ()
|
|
assert logs == ()
|
|
|
|
|
|
@@ -65,7 +156,7 @@ def test_collect_configuration_run_summary_logs_info_for_success():
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.INFO}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.INFO}
|
|
|
|
|
|
|
|
|
|
def test_collect_configuration_run_summary_executes_hooks_for_create():
|
|
def test_collect_configuration_run_summary_executes_hooks_for_create():
|
|
@@ -76,7 +167,7 @@ def test_collect_configuration_run_summary_executes_hooks_for_create():
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.INFO}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.INFO}
|
|
|
|
|
|
|
|
|
|
def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
|
|
def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
|
|
@@ -88,56 +179,74 @@ def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.INFO}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.INFO}
|
|
|
|
|
|
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_extract_with_repository_error():
|
|
|
|
|
|
+def test_collect_configuration_run_summary_logs_extract_with_repository_error():
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
|
|
ValueError
|
|
ValueError
|
|
)
|
|
)
|
|
|
|
+ expected_logs = (flexmock(),)
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
|
|
arguments = {'extract': flexmock(repository='repo')}
|
|
arguments = {'extract': flexmock(repository='repo')}
|
|
|
|
|
|
logs = tuple(
|
|
logs = tuple(
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert logs == expected_logs
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def test_collect_configuration_run_summary_logs_missing_configs_error():
|
|
|
|
+ arguments = {'global': flexmock(config_paths=[])}
|
|
|
|
+ expected_logs = (flexmock(),)
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
|
|
|
|
+
|
|
|
|
+ logs = tuple(module.collect_configuration_run_summary_logs({}, arguments=arguments))
|
|
|
|
|
|
|
|
+ assert logs == expected_logs
|
|
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_pre_hook_error():
|
|
|
|
|
|
+
|
|
|
|
+def test_collect_configuration_run_summary_logs_pre_hook_error():
|
|
flexmock(module.hook).should_receive('execute_hook').and_raise(ValueError)
|
|
flexmock(module.hook).should_receive('execute_hook').and_raise(ValueError)
|
|
|
|
+ expected_logs = (flexmock(),)
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
|
|
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
|
|
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
|
|
|
|
|
|
logs = tuple(
|
|
logs = tuple(
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert logs == expected_logs
|
|
|
|
|
|
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_post_hook_error():
|
|
|
|
|
|
+def test_collect_configuration_run_summary_logs_post_hook_error():
|
|
flexmock(module.hook).should_receive('execute_hook').and_return(None).and_raise(ValueError)
|
|
flexmock(module.hook).should_receive('execute_hook').and_return(None).and_raise(ValueError)
|
|
flexmock(module).should_receive('run_configuration').and_return([])
|
|
flexmock(module).should_receive('run_configuration').and_return([])
|
|
|
|
+ expected_logs = (flexmock(),)
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
|
|
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
|
|
arguments = {'create': flexmock(), 'global': flexmock(dry_run=False)}
|
|
|
|
|
|
logs = tuple(
|
|
logs = tuple(
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.INFO, module.logging.CRITICAL}
|
|
|
|
|
|
+ assert expected_logs[0] in logs
|
|
|
|
|
|
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_list_with_archive_and_repository_error():
|
|
|
|
|
|
+def test_collect_configuration_run_summary_logs_for_list_with_archive_and_repository_error():
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
|
|
ValueError
|
|
ValueError
|
|
)
|
|
)
|
|
|
|
+ expected_logs = (flexmock(),)
|
|
|
|
+ flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
|
|
arguments = {'list': flexmock(repository='repo', archive='test')}
|
|
arguments = {'list': flexmock(repository='repo', archive='test')}
|
|
|
|
|
|
logs = tuple(
|
|
logs = tuple(
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
|
|
+ assert logs == expected_logs
|
|
|
|
|
|
|
|
|
|
def test_collect_configuration_run_summary_logs_info_for_success_with_list():
|
|
def test_collect_configuration_run_summary_logs_info_for_success_with_list():
|
|
@@ -148,25 +257,13 @@ def test_collect_configuration_run_summary_logs_info_for_success_with_list():
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.INFO}
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.INFO}
|
|
|
|
|
|
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_run_value_error():
|
|
|
|
|
|
+def test_collect_configuration_run_summary_logs_run_configuration_error():
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
|
|
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
|
|
- flexmock(module).should_receive('run_configuration').and_raise(ValueError)
|
|
|
|
- arguments = {}
|
|
|
|
-
|
|
|
|
- logs = tuple(
|
|
|
|
- module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_including_output_for_run_process_error():
|
|
|
|
- flexmock(module.validate).should_receive('guard_configuration_contains_repository')
|
|
|
|
- flexmock(module).should_receive('run_configuration').and_raise(
|
|
|
|
- subprocess.CalledProcessError(1, 'command', 'error output')
|
|
|
|
|
|
+ flexmock(module).should_receive('run_configuration').and_return(
|
|
|
|
+ [logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
|
|
)
|
|
)
|
|
arguments = {}
|
|
arguments = {}
|
|
|
|
|
|
@@ -174,8 +271,7 @@ def test_collect_configuration_run_summary_logs_critical_including_output_for_ru
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
|
|
)
|
|
)
|
|
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|
|
- assert any(log for log in logs if 'error output' in str(log))
|
|
|
|
|
|
+ assert {log.levelno for log in logs} == {logging.CRITICAL}
|
|
|
|
|
|
|
|
|
|
def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
|
|
def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
|
|
@@ -190,12 +286,3 @@ def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
|
|
{'test.yaml': {}, 'test2.yaml': {}}, arguments=arguments
|
|
{'test.yaml': {}, 'test2.yaml': {}}, arguments=arguments
|
|
)
|
|
)
|
|
)
|
|
)
|
|
-
|
|
|
|
-
|
|
|
|
-def test_collect_configuration_run_summary_logs_critical_for_missing_configs():
|
|
|
|
- flexmock(module).should_receive('run_configuration').and_return([])
|
|
|
|
- arguments = {'global': flexmock(config_paths=[])}
|
|
|
|
-
|
|
|
|
- logs = tuple(module.collect_configuration_run_summary_logs({}, arguments=arguments))
|
|
|
|
-
|
|
|
|
- assert {log.levelno for log in logs} == {module.logging.CRITICAL}
|
|
|