borgmatic.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. import collections
  2. import copy
  3. import json
  4. import logging
  5. import os
  6. import sys
  7. from subprocess import CalledProcessError
  8. import colorama
  9. import pkg_resources
  10. from borgmatic.borg import check as borg_check
  11. from borgmatic.borg import create as borg_create
  12. from borgmatic.borg import environment as borg_environment
  13. from borgmatic.borg import extract as borg_extract
  14. from borgmatic.borg import info as borg_info
  15. from borgmatic.borg import init as borg_init
  16. from borgmatic.borg import list as borg_list
  17. from borgmatic.borg import mount as borg_mount
  18. from borgmatic.borg import prune as borg_prune
  19. from borgmatic.borg import umount as borg_umount
  20. from borgmatic.commands.arguments import parse_arguments
  21. from borgmatic.config import checks, collect, convert, validate
  22. from borgmatic.hooks import command, dispatch, dump, monitor
  23. from borgmatic.logger import configure_logging, should_do_markup
  24. from borgmatic.signals import configure_signals
  25. from borgmatic.verbosity import verbosity_to_log_level
  26. logger = logging.getLogger(__name__)
  27. LEGACY_CONFIG_PATH = '/etc/borgmatic/config'
  28. def run_configuration(config_filename, config, arguments):
  29. '''
  30. Given a config filename, the corresponding parsed config dict, and command-line arguments as a
  31. dict from subparser name to a namespace of parsed arguments, execute its defined pruning,
  32. backups, consistency checks, and/or other actions.
  33. Yield a combination of:
  34. * JSON output strings from successfully executing any actions that produce JSON
  35. * logging.LogRecord instances containing errors from any actions or backup hooks that fail
  36. '''
  37. (location, storage, retention, consistency, hooks) = (
  38. config.get(section_name, {})
  39. for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')
  40. )
  41. global_arguments = arguments['global']
  42. local_path = location.get('local_path', 'borg')
  43. remote_path = location.get('remote_path')
  44. borg_environment.initialize(storage)
  45. encountered_error = None
  46. error_repository = ''
  47. prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments)
  48. monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
  49. try:
  50. if prune_create_or_check:
  51. dispatch.call_hooks(
  52. 'ping_monitor',
  53. hooks,
  54. config_filename,
  55. monitor.MONITOR_HOOK_NAMES,
  56. monitor.State.START,
  57. monitoring_log_level,
  58. global_arguments.dry_run,
  59. )
  60. if 'prune' in arguments:
  61. command.execute_hook(
  62. hooks.get('before_prune'),
  63. hooks.get('umask'),
  64. config_filename,
  65. 'pre-prune',
  66. global_arguments.dry_run,
  67. )
  68. if 'create' in arguments:
  69. command.execute_hook(
  70. hooks.get('before_backup'),
  71. hooks.get('umask'),
  72. config_filename,
  73. 'pre-backup',
  74. global_arguments.dry_run,
  75. )
  76. dispatch.call_hooks(
  77. 'dump_databases',
  78. hooks,
  79. config_filename,
  80. dump.DATABASE_HOOK_NAMES,
  81. location,
  82. global_arguments.dry_run,
  83. )
  84. if 'check' in arguments:
  85. command.execute_hook(
  86. hooks.get('before_check'),
  87. hooks.get('umask'),
  88. config_filename,
  89. 'pre-check',
  90. global_arguments.dry_run,
  91. )
  92. except (OSError, CalledProcessError) as error:
  93. if command.considered_soft_failure(config_filename, error):
  94. return
  95. encountered_error = error
  96. yield from make_error_log_records(
  97. '{}: Error running pre hook'.format(config_filename), error
  98. )
  99. if not encountered_error:
  100. for repository_path in location['repositories']:
  101. try:
  102. yield from run_actions(
  103. arguments=arguments,
  104. location=location,
  105. storage=storage,
  106. retention=retention,
  107. consistency=consistency,
  108. hooks=hooks,
  109. local_path=local_path,
  110. remote_path=remote_path,
  111. repository_path=repository_path,
  112. )
  113. except (OSError, CalledProcessError, ValueError) as error:
  114. encountered_error = error
  115. error_repository = repository_path
  116. yield from make_error_log_records(
  117. '{}: Error running actions for repository'.format(repository_path), error
  118. )
  119. if not encountered_error:
  120. try:
  121. if 'prune' in arguments:
  122. command.execute_hook(
  123. hooks.get('after_prune'),
  124. hooks.get('umask'),
  125. config_filename,
  126. 'post-prune',
  127. global_arguments.dry_run,
  128. )
  129. if 'create' in arguments:
  130. dispatch.call_hooks(
  131. 'remove_database_dumps',
  132. hooks,
  133. config_filename,
  134. dump.DATABASE_HOOK_NAMES,
  135. location,
  136. global_arguments.dry_run,
  137. )
  138. command.execute_hook(
  139. hooks.get('after_backup'),
  140. hooks.get('umask'),
  141. config_filename,
  142. 'post-backup',
  143. global_arguments.dry_run,
  144. )
  145. if 'check' in arguments:
  146. command.execute_hook(
  147. hooks.get('after_check'),
  148. hooks.get('umask'),
  149. config_filename,
  150. 'post-check',
  151. global_arguments.dry_run,
  152. )
  153. if {'prune', 'create', 'check'}.intersection(arguments):
  154. dispatch.call_hooks(
  155. 'ping_monitor',
  156. hooks,
  157. config_filename,
  158. monitor.MONITOR_HOOK_NAMES,
  159. monitor.State.FINISH,
  160. monitoring_log_level,
  161. global_arguments.dry_run,
  162. )
  163. except (OSError, CalledProcessError) as error:
  164. if command.considered_soft_failure(config_filename, error):
  165. return
  166. encountered_error = error
  167. yield from make_error_log_records(
  168. '{}: Error running post hook'.format(config_filename), error
  169. )
  170. if encountered_error and prune_create_or_check:
  171. try:
  172. command.execute_hook(
  173. hooks.get('on_error'),
  174. hooks.get('umask'),
  175. config_filename,
  176. 'on-error',
  177. global_arguments.dry_run,
  178. repository=error_repository,
  179. error=encountered_error,
  180. output=getattr(encountered_error, 'output', ''),
  181. )
  182. dispatch.call_hooks(
  183. 'ping_monitor',
  184. hooks,
  185. config_filename,
  186. monitor.MONITOR_HOOK_NAMES,
  187. monitor.State.FAIL,
  188. monitoring_log_level,
  189. global_arguments.dry_run,
  190. )
  191. except (OSError, CalledProcessError) as error:
  192. if command.considered_soft_failure(config_filename, error):
  193. return
  194. yield from make_error_log_records(
  195. '{}: Error running on-error hook'.format(config_filename), error
  196. )
  197. def run_actions(
  198. *,
  199. arguments,
  200. location,
  201. storage,
  202. retention,
  203. consistency,
  204. hooks,
  205. local_path,
  206. remote_path,
  207. repository_path
  208. ): # pragma: no cover
  209. '''
  210. Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
  211. configuration dicts, local and remote paths to Borg, and a repository name, run all actions
  212. from the command-line arguments on the given repository.
  213. Yield JSON output strings from executing any actions that produce JSON.
  214. Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an
  215. action. Raise ValueError if the arguments or configuration passed to action are invalid.
  216. '''
  217. repository = os.path.expanduser(repository_path)
  218. global_arguments = arguments['global']
  219. dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
  220. if 'init' in arguments:
  221. logger.info('{}: Initializing repository'.format(repository))
  222. borg_init.initialize_repository(
  223. repository,
  224. storage,
  225. arguments['init'].encryption_mode,
  226. arguments['init'].append_only,
  227. arguments['init'].storage_quota,
  228. local_path=local_path,
  229. remote_path=remote_path,
  230. )
  231. if 'prune' in arguments:
  232. logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
  233. borg_prune.prune_archives(
  234. global_arguments.dry_run,
  235. repository,
  236. storage,
  237. retention,
  238. local_path=local_path,
  239. remote_path=remote_path,
  240. stats=arguments['prune'].stats,
  241. files=arguments['prune'].files,
  242. )
  243. if 'create' in arguments:
  244. logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
  245. json_output = borg_create.create_archive(
  246. global_arguments.dry_run,
  247. repository,
  248. location,
  249. storage,
  250. local_path=local_path,
  251. remote_path=remote_path,
  252. progress=arguments['create'].progress,
  253. stats=arguments['create'].stats,
  254. json=arguments['create'].json,
  255. files=arguments['create'].files,
  256. )
  257. if json_output:
  258. yield json.loads(json_output)
  259. if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
  260. logger.info('{}: Running consistency checks'.format(repository))
  261. borg_check.check_archives(
  262. repository,
  263. storage,
  264. consistency,
  265. local_path=local_path,
  266. remote_path=remote_path,
  267. progress=arguments['check'].progress,
  268. repair=arguments['check'].repair,
  269. only_checks=arguments['check'].only,
  270. )
  271. if 'extract' in arguments:
  272. if arguments['extract'].repository is None or validate.repositories_match(
  273. repository, arguments['extract'].repository
  274. ):
  275. logger.info(
  276. '{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
  277. )
  278. borg_extract.extract_archive(
  279. global_arguments.dry_run,
  280. repository,
  281. borg_list.resolve_archive_name(
  282. repository, arguments['extract'].archive, storage, local_path, remote_path
  283. ),
  284. arguments['extract'].paths,
  285. location,
  286. storage,
  287. local_path=local_path,
  288. remote_path=remote_path,
  289. destination_path=arguments['extract'].destination,
  290. progress=arguments['extract'].progress,
  291. )
  292. if 'mount' in arguments:
  293. if arguments['mount'].repository is None or validate.repositories_match(
  294. repository, arguments['mount'].repository
  295. ):
  296. if arguments['mount'].archive:
  297. logger.info(
  298. '{}: Mounting archive {}'.format(repository, arguments['mount'].archive)
  299. )
  300. else:
  301. logger.info('{}: Mounting repository'.format(repository))
  302. borg_mount.mount_archive(
  303. repository,
  304. borg_list.resolve_archive_name(
  305. repository, arguments['mount'].archive, storage, local_path, remote_path
  306. ),
  307. arguments['mount'].mount_point,
  308. arguments['mount'].paths,
  309. arguments['mount'].foreground,
  310. arguments['mount'].options,
  311. storage,
  312. local_path=local_path,
  313. remote_path=remote_path,
  314. )
  315. if 'restore' in arguments:
  316. if arguments['restore'].repository is None or validate.repositories_match(
  317. repository, arguments['restore'].repository
  318. ):
  319. logger.info(
  320. '{}: Restoring databases from archive {}'.format(
  321. repository, arguments['restore'].archive
  322. )
  323. )
  324. restore_names = arguments['restore'].databases or []
  325. if 'all' in restore_names:
  326. restore_names = []
  327. # Extract dumps for the named databases from the archive.
  328. dump_patterns = dispatch.call_hooks(
  329. 'make_database_dump_patterns',
  330. hooks,
  331. repository,
  332. dump.DATABASE_HOOK_NAMES,
  333. location,
  334. restore_names,
  335. )
  336. borg_extract.extract_archive(
  337. global_arguments.dry_run,
  338. repository,
  339. borg_list.resolve_archive_name(
  340. repository, arguments['restore'].archive, storage, local_path, remote_path
  341. ),
  342. dump.convert_glob_patterns_to_borg_patterns(
  343. dump.flatten_dump_patterns(dump_patterns, restore_names)
  344. ),
  345. location,
  346. storage,
  347. local_path=local_path,
  348. remote_path=remote_path,
  349. destination_path='/',
  350. progress=arguments['restore'].progress,
  351. # We don't want glob patterns that don't match to error.
  352. error_on_warnings=False,
  353. )
  354. # Map the restore names or detected dumps to the corresponding database configurations.
  355. restore_databases = dump.get_per_hook_database_configurations(
  356. hooks, restore_names, dump_patterns
  357. )
  358. # Finally, restore the databases and cleanup the dumps.
  359. dispatch.call_hooks(
  360. 'restore_database_dumps',
  361. restore_databases,
  362. repository,
  363. dump.DATABASE_HOOK_NAMES,
  364. location,
  365. global_arguments.dry_run,
  366. )
  367. dispatch.call_hooks(
  368. 'remove_database_dumps',
  369. restore_databases,
  370. repository,
  371. dump.DATABASE_HOOK_NAMES,
  372. location,
  373. global_arguments.dry_run,
  374. )
  375. if 'list' in arguments:
  376. if arguments['list'].repository is None or validate.repositories_match(
  377. repository, arguments['list'].repository
  378. ):
  379. list_arguments = copy.copy(arguments['list'])
  380. if not list_arguments.json:
  381. logger.warning('{}: Listing archives'.format(repository))
  382. list_arguments.archive = borg_list.resolve_archive_name(
  383. repository, list_arguments.archive, storage, local_path, remote_path
  384. )
  385. json_output = borg_list.list_archives(
  386. repository,
  387. storage,
  388. list_arguments=list_arguments,
  389. local_path=local_path,
  390. remote_path=remote_path,
  391. )
  392. if json_output:
  393. yield json.loads(json_output)
  394. if 'info' in arguments:
  395. if arguments['info'].repository is None or validate.repositories_match(
  396. repository, arguments['info'].repository
  397. ):
  398. info_arguments = copy.copy(arguments['info'])
  399. if not info_arguments.json:
  400. logger.warning('{}: Displaying summary info for archives'.format(repository))
  401. info_arguments.archive = borg_list.resolve_archive_name(
  402. repository, info_arguments.archive, storage, local_path, remote_path
  403. )
  404. json_output = borg_info.display_archives_info(
  405. repository,
  406. storage,
  407. info_arguments=info_arguments,
  408. local_path=local_path,
  409. remote_path=remote_path,
  410. )
  411. if json_output:
  412. yield json.loads(json_output)
  413. def load_configurations(config_filenames, overrides=None):
  414. '''
  415. Given a sequence of configuration filenames, load and validate each configuration file. Return
  416. the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
  417. and sequence of logging.LogRecord instances containing any parse errors.
  418. '''
  419. # Dict mapping from config filename to corresponding parsed config dict.
  420. configs = collections.OrderedDict()
  421. logs = []
  422. # Parse and load each configuration file.
  423. for config_filename in config_filenames:
  424. try:
  425. configs[config_filename] = validate.parse_configuration(
  426. config_filename, validate.schema_filename(), overrides
  427. )
  428. except (ValueError, OSError, validate.Validation_error) as error:
  429. logs.extend(
  430. [
  431. logging.makeLogRecord(
  432. dict(
  433. levelno=logging.CRITICAL,
  434. levelname='CRITICAL',
  435. msg='{}: Error parsing configuration file'.format(config_filename),
  436. )
  437. ),
  438. logging.makeLogRecord(
  439. dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
  440. ),
  441. ]
  442. )
  443. return (configs, logs)
  444. def log_record(suppress_log=False, **kwargs):
  445. '''
  446. Create a log record based on the given makeLogRecord() arguments, one of which must be
  447. named "levelno". Log the record (unless suppress log is set) and return it.
  448. '''
  449. record = logging.makeLogRecord(kwargs)
  450. if suppress_log:
  451. return record
  452. logger.handle(record)
  453. return record
  454. def make_error_log_records(message, error=None):
  455. '''
  456. Given error message text and an optional exception object, yield a series of logging.LogRecord
  457. instances with error summary information. As a side effect, log each record.
  458. '''
  459. if not error:
  460. yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
  461. return
  462. try:
  463. raise error
  464. except CalledProcessError as error:
  465. yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
  466. if error.output:
  467. # Suppress these logs for now and save full error output for the log summary at the end.
  468. yield log_record(
  469. levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
  470. )
  471. yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
  472. except (ValueError, OSError) as error:
  473. yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
  474. yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
  475. except: # noqa: E722
  476. # Raising above only as a means of determining the error type. Swallow the exception here
  477. # because we don't want the exception to propagate out of this function.
  478. pass
  479. def get_local_path(configs):
  480. '''
  481. Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
  482. set.
  483. '''
  484. return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
  485. def collect_configuration_run_summary_logs(configs, arguments):
  486. '''
  487. Given a dict of configuration filename to corresponding parsed configuration, and parsed
  488. command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
  489. each configuration file and yield a series of logging.LogRecord instances containing summary
  490. information about each run.
  491. As a side effect of running through these configuration files, output their JSON results, if
  492. any, to stdout.
  493. '''
  494. # Run cross-file validation checks.
  495. if 'extract' in arguments:
  496. repository = arguments['extract'].repository
  497. elif 'list' in arguments and arguments['list'].archive:
  498. repository = arguments['list'].repository
  499. elif 'mount' in arguments:
  500. repository = arguments['mount'].repository
  501. else:
  502. repository = None
  503. if repository:
  504. try:
  505. validate.guard_configuration_contains_repository(repository, configs)
  506. except ValueError as error:
  507. yield from make_error_log_records(str(error))
  508. return
  509. if not configs:
  510. yield from make_error_log_records(
  511. '{}: No configuration files found'.format(' '.join(arguments['global'].config_paths))
  512. )
  513. return
  514. if 'create' in arguments:
  515. try:
  516. for config_filename, config in configs.items():
  517. hooks = config.get('hooks', {})
  518. command.execute_hook(
  519. hooks.get('before_everything'),
  520. hooks.get('umask'),
  521. config_filename,
  522. 'pre-everything',
  523. arguments['global'].dry_run,
  524. )
  525. except (CalledProcessError, ValueError, OSError) as error:
  526. yield from make_error_log_records('Error running pre-everything hook', error)
  527. return
  528. # Execute the actions corresponding to each configuration file.
  529. json_results = []
  530. for config_filename, config in configs.items():
  531. results = list(run_configuration(config_filename, config, arguments))
  532. error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
  533. if error_logs:
  534. yield from make_error_log_records(
  535. '{}: Error running configuration file'.format(config_filename)
  536. )
  537. yield from error_logs
  538. else:
  539. yield logging.makeLogRecord(
  540. dict(
  541. levelno=logging.INFO,
  542. levelname='INFO',
  543. msg='{}: Successfully ran configuration file'.format(config_filename),
  544. )
  545. )
  546. if results:
  547. json_results.extend(results)
  548. if 'umount' in arguments:
  549. logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
  550. try:
  551. borg_umount.unmount_archive(
  552. mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
  553. )
  554. except (CalledProcessError, OSError) as error:
  555. yield from make_error_log_records('Error unmounting mount point', error)
  556. if json_results:
  557. sys.stdout.write(json.dumps(json_results))
  558. if 'create' in arguments:
  559. try:
  560. for config_filename, config in configs.items():
  561. hooks = config.get('hooks', {})
  562. command.execute_hook(
  563. hooks.get('after_everything'),
  564. hooks.get('umask'),
  565. config_filename,
  566. 'post-everything',
  567. arguments['global'].dry_run,
  568. )
  569. except (CalledProcessError, ValueError, OSError) as error:
  570. yield from make_error_log_records('Error running post-everything hook', error)
  571. def exit_with_help_link(): # pragma: no cover
  572. '''
  573. Display a link to get help and exit with an error code.
  574. '''
  575. logger.critical('')
  576. logger.critical('Need some help? https://torsion.org/borgmatic/#issues')
  577. sys.exit(1)
  578. def main(): # pragma: no cover
  579. configure_signals()
  580. try:
  581. arguments = parse_arguments(*sys.argv[1:])
  582. except ValueError as error:
  583. configure_logging(logging.CRITICAL)
  584. logger.critical(error)
  585. exit_with_help_link()
  586. except SystemExit as error:
  587. if error.code == 0:
  588. raise error
  589. configure_logging(logging.CRITICAL)
  590. logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
  591. exit_with_help_link()
  592. global_arguments = arguments['global']
  593. if global_arguments.version:
  594. print(pkg_resources.require('borgmatic')[0].version)
  595. sys.exit(0)
  596. config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
  597. configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides)
  598. any_json_flags = any(
  599. getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
  600. )
  601. colorama.init(
  602. autoreset=True,
  603. strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
  604. )
  605. try:
  606. configure_logging(
  607. verbosity_to_log_level(global_arguments.verbosity),
  608. verbosity_to_log_level(global_arguments.syslog_verbosity),
  609. verbosity_to_log_level(global_arguments.log_file_verbosity),
  610. verbosity_to_log_level(global_arguments.monitoring_verbosity),
  611. global_arguments.log_file,
  612. )
  613. except (FileNotFoundError, PermissionError) as error:
  614. configure_logging(logging.CRITICAL)
  615. logger.critical('Error configuring logging: {}'.format(error))
  616. exit_with_help_link()
  617. logger.debug('Ensuring legacy configuration is upgraded')
  618. convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
  619. summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
  620. summary_logs_max_level = max(log.levelno for log in summary_logs)
  621. for message in ('', 'summary:'):
  622. log_record(
  623. levelno=summary_logs_max_level,
  624. levelname=logging.getLevelName(summary_logs_max_level),
  625. msg=message,
  626. )
  627. for log in summary_logs:
  628. logger.handle(log)
  629. if summary_logs_max_level >= logging.CRITICAL:
  630. exit_with_help_link()