123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692 |
- import collections
- import copy
- import json
- import logging
- import os
- import sys
- from subprocess import CalledProcessError
- import colorama
- import pkg_resources
- from borgmatic.borg import check as borg_check
- from borgmatic.borg import create as borg_create
- from borgmatic.borg import environment as borg_environment
- from borgmatic.borg import extract as borg_extract
- from borgmatic.borg import info as borg_info
- from borgmatic.borg import init as borg_init
- from borgmatic.borg import list as borg_list
- from borgmatic.borg import mount as borg_mount
- from borgmatic.borg import prune as borg_prune
- from borgmatic.borg import umount as borg_umount
- from borgmatic.commands.arguments import parse_arguments
- from borgmatic.config import checks, collect, convert, validate
- from borgmatic.hooks import command, dispatch, dump, monitor
- from borgmatic.logger import configure_logging, should_do_markup
- from borgmatic.signals import configure_signals
- from borgmatic.verbosity import verbosity_to_log_level
- logger = logging.getLogger(__name__)
- LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
- def run_configuration(config_filename, config, arguments):
- '''
- Given a config filename, the corresponding parsed config dict, and command-line arguments as a
- dict from subparser name to a namespace of parsed arguments, execute its defined pruning,
- backups, consistency checks, and/or other actions.
- Yield a combination of:
- * JSON output strings from successfully executing any actions that produce JSON
- * logging.LogRecord instances containing errors from any actions or backup hooks that fail
- '''
- (location, storage, retention, consistency, hooks) = (
- config.get(section_name, {})
- for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')
- )
- global_arguments = arguments['global']
- local_path = location.get('local_path', 'borg')
- remote_path = location.get('remote_path')
- borg_environment.initialize(storage)
- encountered_error = None
- error_repository = ''
- prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments)
- monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
- try:
- if prune_create_or_check:
- dispatch.call_hooks(
- 'ping_monitor',
- hooks,
- config_filename,
- monitor.MONITOR_HOOK_NAMES,
- monitor.State.START,
- monitoring_log_level,
- global_arguments.dry_run,
- )
- if 'prune' in arguments:
- command.execute_hook(
- hooks.get('before_prune'),
- hooks.get('umask'),
- config_filename,
- 'pre-prune',
- global_arguments.dry_run,
- )
- if 'create' in arguments:
- command.execute_hook(
- hooks.get('before_backup'),
- hooks.get('umask'),
- config_filename,
- 'pre-backup',
- global_arguments.dry_run,
- )
- dispatch.call_hooks(
- 'dump_databases',
- hooks,
- config_filename,
- dump.DATABASE_HOOK_NAMES,
- location,
- global_arguments.dry_run,
- )
- if 'check' in arguments:
- command.execute_hook(
- hooks.get('before_check'),
- hooks.get('umask'),
- config_filename,
- 'pre-check',
- global_arguments.dry_run,
- )
- except (OSError, CalledProcessError) as error:
- if command.considered_soft_failure(config_filename, error):
- return
- encountered_error = error
- yield from make_error_log_records(
- '{}: Error running pre hook'.format(config_filename), error
- )
- if not encountered_error:
- for repository_path in location['repositories']:
- try:
- yield from run_actions(
- arguments=arguments,
- location=location,
- storage=storage,
- retention=retention,
- consistency=consistency,
- hooks=hooks,
- local_path=local_path,
- remote_path=remote_path,
- repository_path=repository_path,
- )
- except (OSError, CalledProcessError, ValueError) as error:
- encountered_error = error
- error_repository = repository_path
- yield from make_error_log_records(
- '{}: Error running actions for repository'.format(repository_path), error
- )
- if not encountered_error:
- try:
- if 'prune' in arguments:
- command.execute_hook(
- hooks.get('after_prune'),
- hooks.get('umask'),
- config_filename,
- 'post-prune',
- global_arguments.dry_run,
- )
- if 'create' in arguments:
- dispatch.call_hooks(
- 'remove_database_dumps',
- hooks,
- config_filename,
- dump.DATABASE_HOOK_NAMES,
- location,
- global_arguments.dry_run,
- )
- command.execute_hook(
- hooks.get('after_backup'),
- hooks.get('umask'),
- config_filename,
- 'post-backup',
- global_arguments.dry_run,
- )
- if 'check' in arguments:
- command.execute_hook(
- hooks.get('after_check'),
- hooks.get('umask'),
- config_filename,
- 'post-check',
- global_arguments.dry_run,
- )
- if {'prune', 'create', 'check'}.intersection(arguments):
- dispatch.call_hooks(
- 'ping_monitor',
- hooks,
- config_filename,
- monitor.MONITOR_HOOK_NAMES,
- monitor.State.FINISH,
- monitoring_log_level,
- global_arguments.dry_run,
- )
- except (OSError, CalledProcessError) as error:
- if command.considered_soft_failure(config_filename, error):
- return
- encountered_error = error
- yield from make_error_log_records(
- '{}: Error running post hook'.format(config_filename), error
- )
- if encountered_error and prune_create_or_check:
- try:
- command.execute_hook(
- hooks.get('on_error'),
- hooks.get('umask'),
- config_filename,
- 'on-error',
- global_arguments.dry_run,
- repository=error_repository,
- error=encountered_error,
- output=getattr(encountered_error, 'output', ''),
- )
- dispatch.call_hooks(
- 'ping_monitor',
- hooks,
- config_filename,
- monitor.MONITOR_HOOK_NAMES,
- monitor.State.FAIL,
- monitoring_log_level,
- global_arguments.dry_run,
- )
- except (OSError, CalledProcessError) as error:
- if command.considered_soft_failure(config_filename, error):
- return
- yield from make_error_log_records(
- '{}: Error running on-error hook'.format(config_filename), error
- )
- def run_actions(
- *,
- arguments,
- location,
- storage,
- retention,
- consistency,
- hooks,
- local_path,
- remote_path,
- repository_path
- ): # pragma: no cover
- '''
- Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
- configuration dicts, local and remote paths to Borg, and a repository name, run all actions
- from the command-line arguments on the given repository.
- Yield JSON output strings from executing any actions that produce JSON.
- Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an
- action. Raise ValueError if the arguments or configuration passed to action are invalid.
- '''
- repository = os.path.expanduser(repository_path)
- global_arguments = arguments['global']
- dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
- if 'init' in arguments:
- logger.info('{}: Initializing repository'.format(repository))
- borg_init.initialize_repository(
- repository,
- storage,
- arguments['init'].encryption_mode,
- arguments['init'].append_only,
- arguments['init'].storage_quota,
- local_path=local_path,
- remote_path=remote_path,
- )
- if 'prune' in arguments:
- logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
- borg_prune.prune_archives(
- global_arguments.dry_run,
- repository,
- storage,
- retention,
- local_path=local_path,
- remote_path=remote_path,
- stats=arguments['prune'].stats,
- files=arguments['prune'].files,
- )
- if 'create' in arguments:
- logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
- json_output = borg_create.create_archive(
- global_arguments.dry_run,
- repository,
- location,
- storage,
- local_path=local_path,
- remote_path=remote_path,
- progress=arguments['create'].progress,
- stats=arguments['create'].stats,
- json=arguments['create'].json,
- files=arguments['create'].files,
- )
- if json_output:
- yield json.loads(json_output)
- if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
- logger.info('{}: Running consistency checks'.format(repository))
- borg_check.check_archives(
- repository,
- storage,
- consistency,
- local_path=local_path,
- remote_path=remote_path,
- progress=arguments['check'].progress,
- repair=arguments['check'].repair,
- only_checks=arguments['check'].only,
- )
- if 'extract' in arguments:
- if arguments['extract'].repository is None or validate.repositories_match(
- repository, arguments['extract'].repository
- ):
- logger.info(
- '{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
- )
- borg_extract.extract_archive(
- global_arguments.dry_run,
- repository,
- borg_list.resolve_archive_name(
- repository, arguments['extract'].archive, storage, local_path, remote_path
- ),
- arguments['extract'].paths,
- location,
- storage,
- local_path=local_path,
- remote_path=remote_path,
- destination_path=arguments['extract'].destination,
- progress=arguments['extract'].progress,
- )
- if 'mount' in arguments:
- if arguments['mount'].repository is None or validate.repositories_match(
- repository, arguments['mount'].repository
- ):
- if arguments['mount'].archive:
- logger.info(
- '{}: Mounting archive {}'.format(repository, arguments['mount'].archive)
- )
- else:
- logger.info('{}: Mounting repository'.format(repository))
- borg_mount.mount_archive(
- repository,
- borg_list.resolve_archive_name(
- repository, arguments['mount'].archive, storage, local_path, remote_path
- ),
- arguments['mount'].mount_point,
- arguments['mount'].paths,
- arguments['mount'].foreground,
- arguments['mount'].options,
- storage,
- local_path=local_path,
- remote_path=remote_path,
- )
- if 'restore' in arguments:
- if arguments['restore'].repository is None or validate.repositories_match(
- repository, arguments['restore'].repository
- ):
- logger.info(
- '{}: Restoring databases from archive {}'.format(
- repository, arguments['restore'].archive
- )
- )
- restore_names = arguments['restore'].databases or []
- if 'all' in restore_names:
- restore_names = []
- # Extract dumps for the named databases from the archive.
- dump_patterns = dispatch.call_hooks(
- 'make_database_dump_patterns',
- hooks,
- repository,
- dump.DATABASE_HOOK_NAMES,
- location,
- restore_names,
- )
- borg_extract.extract_archive(
- global_arguments.dry_run,
- repository,
- borg_list.resolve_archive_name(
- repository, arguments['restore'].archive, storage, local_path, remote_path
- ),
- dump.convert_glob_patterns_to_borg_patterns(
- dump.flatten_dump_patterns(dump_patterns, restore_names)
- ),
- location,
- storage,
- local_path=local_path,
- remote_path=remote_path,
- destination_path='/',
- progress=arguments['restore'].progress,
- # We don't want glob patterns that don't match to error.
- error_on_warnings=False,
- )
- # Map the restore names or detected dumps to the corresponding database configurations.
- restore_databases = dump.get_per_hook_database_configurations(
- hooks, restore_names, dump_patterns
- )
- # Finally, restore the databases and cleanup the dumps.
- dispatch.call_hooks(
- 'restore_database_dumps',
- restore_databases,
- repository,
- dump.DATABASE_HOOK_NAMES,
- location,
- global_arguments.dry_run,
- )
- dispatch.call_hooks(
- 'remove_database_dumps',
- restore_databases,
- repository,
- dump.DATABASE_HOOK_NAMES,
- location,
- global_arguments.dry_run,
- )
- if 'list' in arguments:
- if arguments['list'].repository is None or validate.repositories_match(
- repository, arguments['list'].repository
- ):
- list_arguments = copy.copy(arguments['list'])
- if not list_arguments.json:
- logger.warning('{}: Listing archives'.format(repository))
- list_arguments.archive = borg_list.resolve_archive_name(
- repository, list_arguments.archive, storage, local_path, remote_path
- )
- json_output = borg_list.list_archives(
- repository,
- storage,
- list_arguments=list_arguments,
- local_path=local_path,
- remote_path=remote_path,
- )
- if json_output:
- yield json.loads(json_output)
- if 'info' in arguments:
- if arguments['info'].repository is None or validate.repositories_match(
- repository, arguments['info'].repository
- ):
- info_arguments = copy.copy(arguments['info'])
- if not info_arguments.json:
- logger.warning('{}: Displaying summary info for archives'.format(repository))
- info_arguments.archive = borg_list.resolve_archive_name(
- repository, info_arguments.archive, storage, local_path, remote_path
- )
- json_output = borg_info.display_archives_info(
- repository,
- storage,
- info_arguments=info_arguments,
- local_path=local_path,
- remote_path=remote_path,
- )
- if json_output:
- yield json.loads(json_output)
- def load_configurations(config_filenames, overrides=None):
- '''
- Given a sequence of configuration filenames, load and validate each configuration file. Return
- the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
- and sequence of logging.LogRecord instances containing any parse errors.
- '''
- # Dict mapping from config filename to corresponding parsed config dict.
- configs = collections.OrderedDict()
- logs = []
- # Parse and load each configuration file.
- for config_filename in config_filenames:
- try:
- configs[config_filename] = validate.parse_configuration(
- config_filename, validate.schema_filename(), overrides
- )
- except (ValueError, OSError, validate.Validation_error) as error:
- logs.extend(
- [
- logging.makeLogRecord(
- dict(
- levelno=logging.CRITICAL,
- levelname='CRITICAL',
- msg='{}: Error parsing configuration file'.format(config_filename),
- )
- ),
- logging.makeLogRecord(
- dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
- ),
- ]
- )
- return (configs, logs)
- def log_record(suppress_log=False, **kwargs):
- '''
- Create a log record based on the given makeLogRecord() arguments, one of which must be
- named "levelno". Log the record (unless suppress log is set) and return it.
- '''
- record = logging.makeLogRecord(kwargs)
- if suppress_log:
- return record
- logger.handle(record)
- return record
- def make_error_log_records(message, error=None):
- '''
- Given error message text and an optional exception object, yield a series of logging.LogRecord
- instances with error summary information. As a side effect, log each record.
- '''
- if not error:
- yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
- return
- try:
- raise error
- except CalledProcessError as error:
- yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
- if error.output:
- # Suppress these logs for now and save full error output for the log summary at the end.
- yield log_record(
- levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
- )
- yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
- except (ValueError, OSError) as error:
- yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
- yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
- except: # noqa: E722
- # Raising above only as a means of determining the error type. Swallow the exception here
- # because we don't want the exception to propagate out of this function.
- pass
- def get_local_path(configs):
- '''
- Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
- set.
- '''
- return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
- def collect_configuration_run_summary_logs(configs, arguments):
- '''
- Given a dict of configuration filename to corresponding parsed configuration, and parsed
- command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
- each configuration file and yield a series of logging.LogRecord instances containing summary
- information about each run.
- As a side effect of running through these configuration files, output their JSON results, if
- any, to stdout.
- '''
- # Run cross-file validation checks.
- if 'extract' in arguments:
- repository = arguments['extract'].repository
- elif 'list' in arguments and arguments['list'].archive:
- repository = arguments['list'].repository
- elif 'mount' in arguments:
- repository = arguments['mount'].repository
- else:
- repository = None
- if repository:
- try:
- validate.guard_configuration_contains_repository(repository, configs)
- except ValueError as error:
- yield from make_error_log_records(str(error))
- return
- if not configs:
- yield from make_error_log_records(
- '{}: No configuration files found'.format(' '.join(arguments['global'].config_paths))
- )
- return
- if 'create' in arguments:
- try:
- for config_filename, config in configs.items():
- hooks = config.get('hooks', {})
- command.execute_hook(
- hooks.get('before_everything'),
- hooks.get('umask'),
- config_filename,
- 'pre-everything',
- arguments['global'].dry_run,
- )
- except (CalledProcessError, ValueError, OSError) as error:
- yield from make_error_log_records('Error running pre-everything hook', error)
- return
- # Execute the actions corresponding to each configuration file.
- json_results = []
- for config_filename, config in configs.items():
- results = list(run_configuration(config_filename, config, arguments))
- error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
- if error_logs:
- yield from make_error_log_records(
- '{}: Error running configuration file'.format(config_filename)
- )
- yield from error_logs
- else:
- yield logging.makeLogRecord(
- dict(
- levelno=logging.INFO,
- levelname='INFO',
- msg='{}: Successfully ran configuration file'.format(config_filename),
- )
- )
- if results:
- json_results.extend(results)
- if 'umount' in arguments:
- logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
- try:
- borg_umount.unmount_archive(
- mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
- )
- except (CalledProcessError, OSError) as error:
- yield from make_error_log_records('Error unmounting mount point', error)
- if json_results:
- sys.stdout.write(json.dumps(json_results))
- if 'create' in arguments:
- try:
- for config_filename, config in configs.items():
- hooks = config.get('hooks', {})
- command.execute_hook(
- hooks.get('after_everything'),
- hooks.get('umask'),
- config_filename,
- 'post-everything',
- arguments['global'].dry_run,
- )
- except (CalledProcessError, ValueError, OSError) as error:
- yield from make_error_log_records('Error running post-everything hook', error)
- def exit_with_help_link(): # pragma: no cover
- '''
- Display a link to get help and exit with an error code.
- '''
- logger.critical('')
- logger.critical('Need some help? https://torsion.org/borgmatic/#issues')
- sys.exit(1)
- def main(): # pragma: no cover
- configure_signals()
- try:
- arguments = parse_arguments(*sys.argv[1:])
- except ValueError as error:
- configure_logging(logging.CRITICAL)
- logger.critical(error)
- exit_with_help_link()
- except SystemExit as error:
- if error.code == 0:
- raise error
- configure_logging(logging.CRITICAL)
- logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
- exit_with_help_link()
- global_arguments = arguments['global']
- if global_arguments.version:
- print(pkg_resources.require('borgmatic')[0].version)
- sys.exit(0)
- config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
- configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides)
- any_json_flags = any(
- getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
- )
- colorama.init(
- autoreset=True,
- strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
- )
- try:
- configure_logging(
- verbosity_to_log_level(global_arguments.verbosity),
- verbosity_to_log_level(global_arguments.syslog_verbosity),
- verbosity_to_log_level(global_arguments.log_file_verbosity),
- verbosity_to_log_level(global_arguments.monitoring_verbosity),
- global_arguments.log_file,
- )
- except (FileNotFoundError, PermissionError) as error:
- configure_logging(logging.CRITICAL)
- logger.critical('Error configuring logging: {}'.format(error))
- exit_with_help_link()
- logger.debug('Ensuring legacy configuration is upgraded')
- convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
- summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
- summary_logs_max_level = max(log.levelno for log in summary_logs)
- for message in ('', 'summary:'):
- log_record(
- levelno=summary_logs_max_level,
- levelname=logging.getLevelName(summary_logs_max_level),
- msg=message,
- )
- for log in summary_logs:
- logger.handle(log)
- if summary_logs_max_level >= logging.CRITICAL:
- exit_with_help_link()
|