Browse Source

blacken the code

Thomas Waldmann 9 months ago
parent
commit
1231c961fb

+ 5 - 2
src/borg/archiver/_common.py

@@ -31,7 +31,9 @@ from ..logger import create_logger
 logger = create_logger(__name__)
 logger = create_logger(__name__)
 
 
 
 
-def get_repository(location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2):
+def get_repository(
+    location, *, create, exclusive, lock_wait, lock, append_only, make_parent_dirs, storage_quota, args, v1_or_v2
+):
     if location.proto in ("ssh", "socket"):
     if location.proto in ("ssh", "socket"):
         RemoteRepoCls = RemoteRepository if v1_or_v2 else RemoteRepository3
         RemoteRepoCls = RemoteRepository if v1_or_v2 else RemoteRepository3
         repository = RemoteRepoCls(
         repository = RemoteRepoCls(
@@ -209,7 +211,8 @@ def with_other_repository(manifest=False, cache=False, compatibility=None):
                 acceptable_versions = (1, 2) if v1_or_v2 else (3,)
                 acceptable_versions = (1, 2) if v1_or_v2 else (3,)
                 if repository.version not in acceptable_versions:
                 if repository.version not in acceptable_versions:
                     raise Error(
                     raise Error(
-                        f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo.")
+                        f"This borg version only accepts version {' or '.join(acceptable_versions)} repos for --other-repo."
+                    )
                 kwargs["other_repository"] = repository
                 kwargs["other_repository"] = repository
                 if manifest or cache:
                 if manifest or cache:
                     manifest_ = Manifest.load(
                     manifest_ = Manifest.load(

+ 9 - 3
src/borg/archiver/compact_cmd.py

@@ -11,6 +11,7 @@ from ..remote3 import RemoteRepository3
 from ..repository3 import Repository3
 from ..repository3 import Repository3
 
 
 from ..logger import create_logger
 from ..logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 
 
@@ -32,7 +33,13 @@ class ArchiveGarbageCollector:
         logger.info("Getting object IDs present in the repository...")
         logger.info("Getting object IDs present in the repository...")
         self.repository_chunks = self.get_repository_chunks()
         self.repository_chunks = self.get_repository_chunks()
         logger.info("Computing object IDs used by archives...")
         logger.info("Computing object IDs used by archives...")
-        self.used_chunks, self.wanted_chunks, self.total_files, self.total_size, self.archives_count = self.analyze_archives()
+        (
+            self.used_chunks,
+            self.wanted_chunks,
+            self.total_files,
+            self.total_size,
+            self.archives_count,
+        ) = self.analyze_archives()
         self.report_and_delete()
         self.report_and_delete()
         logger.info("Finished compaction / garbage collection...")
         logger.info("Finished compaction / garbage collection...")
 
 
@@ -109,8 +116,7 @@ class ArchiveGarbageCollector:
         if unused:
         if unused:
             logger.info(f"Deleting {len(unused)} unused objects...")
             logger.info(f"Deleting {len(unused)} unused objects...")
             pi = ProgressIndicatorPercent(
             pi = ProgressIndicatorPercent(
-                total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1,
-                msgid="compact.report_and_delete"
+                total=len(unused), msg="Deleting unused objects %3.1f%%", step=0.1, msgid="compact.report_and_delete"
             )
             )
             for i, id in enumerate(unused):
             for i, id in enumerate(unused):
                 pi.show(i)
                 pi.show(i)

+ 0 - 1
src/borg/archiver/delete_cmd.py

@@ -51,7 +51,6 @@ class DeleteMixIn:
             self.print_warning("Aborted.", wc=None)
             self.print_warning("Aborted.", wc=None)
         return
         return
 
 
-
     def build_parser_delete(self, subparsers, common_parser, mid_common_parser):
     def build_parser_delete(self, subparsers, common_parser, mid_common_parser):
         from ._common import process_epilog, define_archive_filters_group
         from ._common import process_epilog, define_archive_filters_group
 
 

+ 1 - 0
src/borg/cache.py

@@ -832,6 +832,7 @@ class AdHocCache(ChunksMixin):
     Chunks that were not added during the current AdHocCache lifetime won't have correct size set
     Chunks that were not added during the current AdHocCache lifetime won't have correct size set
     (0 bytes) and will have an infinite reference count (MAX_VALUE).
     (0 bytes) and will have an infinite reference count (MAX_VALUE).
     """
     """
+
     def __init__(self, manifest, warn_if_unencrypted=True, lock_wait=None, iec=False):
     def __init__(self, manifest, warn_if_unencrypted=True, lock_wait=None, iec=False):
         assert isinstance(manifest, Manifest)
         assert isinstance(manifest, Manifest)
         self.manifest = manifest
         self.manifest = manifest

+ 1 - 1
src/borg/helpers/parseformat.py

@@ -1192,7 +1192,7 @@ class BorgJsonEncoder(json.JSONEncoder):
             return {"id": bin_to_hex(o.id), "location": o._location.canonical_path()}
             return {"id": bin_to_hex(o.id), "location": o._location.canonical_path()}
         if isinstance(o, Archive):
         if isinstance(o, Archive):
             return o.info()
             return o.info()
-        if isinstance(o, (AdHocWithFilesCache, )):
+        if isinstance(o, (AdHocWithFilesCache,)):
             return {"path": o.path}
             return {"path": o.path}
         if isinstance(o, AdHocCache):
         if isinstance(o, AdHocCache):
             return {}
             return {}

+ 8 - 4
src/borg/locking3.py

@@ -65,7 +65,7 @@ class Lock:
     matter how (e.g. if an exception occurred).
     matter how (e.g. if an exception occurred).
     """
     """
 
 
-    def __init__(self, store, exclusive=False, sleep=None, timeout=1.0, stale=30*60, id=None):
+    def __init__(self, store, exclusive=False, sleep=None, timeout=1.0, stale=30 * 60, id=None):
         self.store = store
         self.store = store
         self.is_exclusive = exclusive
         self.is_exclusive = exclusive
         self.sleep = sleep
         self.sleep = sleep
@@ -75,7 +75,7 @@ class Lock:
         self.retry_delay_min = 1.0
         self.retry_delay_min = 1.0
         self.retry_delay_max = 5.0
         self.retry_delay_max = 5.0
         self.stale_td = datetime.timedelta(seconds=stale)  # ignore/delete it if older
         self.stale_td = datetime.timedelta(seconds=stale)  # ignore/delete it if older
-        self.refresh_td = datetime.timedelta(seconds=stale//2)  # don't refresh it if younger
+        self.refresh_td = datetime.timedelta(seconds=stale // 2)  # don't refresh it if younger
         self.last_refresh_dt = None
         self.last_refresh_dt = None
         self.id = id or platform.get_process_id()
         self.id = id or platform.get_process_id()
         assert len(self.id) == 3
         assert len(self.id) == 3
@@ -134,7 +134,9 @@ class Lock:
         found_locks = []
         found_locks = []
         for key in locks:
         for key in locks:
             lock = locks[key]
             lock = locks[key]
-            if (not only_exclusive or lock["exclusive"]) and (not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id):
+            if (not only_exclusive or lock["exclusive"]) and (
+                not only_mine or (lock["hostid"], lock["processid"], lock["threadid"]) == self.id
+            ):
                 found_locks.append(lock)
                 found_locks.append(lock)
         return found_locks
         return found_locks
 
 
@@ -150,7 +152,9 @@ class Lock:
                 key = self._create_lock(exclusive=self.is_exclusive)
                 key = self._create_lock(exclusive=self.is_exclusive)
                 # obviously we have a race condition here: other client(s) might have created exclusive
                 # obviously we have a race condition here: other client(s) might have created exclusive
                 # lock(s) at the same time in parallel. thus we have to check again.
                 # lock(s) at the same time in parallel. thus we have to check again.
-                time.sleep(self.race_recheck_delay)  # give other clients time to notice our exclusive lock, stop creating theirs
+                time.sleep(
+                    self.race_recheck_delay
+                )  # give other clients time to notice our exclusive lock, stop creating theirs
                 exclusive_locks = self._find_locks(only_exclusive=True)
                 exclusive_locks = self._find_locks(only_exclusive=True)
                 if self.is_exclusive:
                 if self.is_exclusive:
                     if len(exclusive_locks) == 1 and exclusive_locks[0]["key"] == key:
                     if len(exclusive_locks) == 1 and exclusive_locks[0]["key"] == key:

+ 3 - 4
src/borg/manifest.py

@@ -8,6 +8,7 @@ from collections.abc import Sequence
 from borgstore.store import ObjectNotFound, ItemInfo
 from borgstore.store import ObjectNotFound, ItemInfo
 
 
 from .logger import create_logger
 from .logger import create_logger
+
 logger = create_logger()
 logger = create_logger()
 
 
 from .constants import *  # NOQA
 from .constants import *  # NOQA
@@ -263,6 +264,7 @@ class Manifest:
 
 
         if isinstance(repository, (Repository3, RemoteRepository3)):
         if isinstance(repository, (Repository3, RemoteRepository3)):
             from .helpers import msgpack
             from .helpers import msgpack
+
             archives = {}
             archives = {}
             try:
             try:
                 infos = list(repository.store_list("archives"))
                 infos = list(repository.store_list("archives"))
@@ -357,10 +359,7 @@ class Manifest:
             manifest_archives = StableDict(self.archives.get_raw_dict())
             manifest_archives = StableDict(self.archives.get_raw_dict())
 
 
         manifest = ManifestItem(
         manifest = ManifestItem(
-            version=2,
-            archives=manifest_archives,
-            timestamp=self.timestamp,
-            config=StableDict(self.config),
+            version=2, archives=manifest_archives, timestamp=self.timestamp, config=StableDict(self.config)
         )
         )
         data = self.key.pack_metadata(manifest.as_dict())
         data = self.key.pack_metadata(manifest.as_dict())
         self.id = self.repo_objs.id_hash(data)
         self.id = self.repo_objs.id_hash(data)

+ 9 - 1
src/borg/remote.py

@@ -945,7 +945,15 @@ class RemoteRepository:
         v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True},  # TODO fix version
         v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True},  # TODO fix version
     )
     )
     def open(
     def open(
-        self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
+        self,
+        path,
+        create=False,
+        lock_wait=None,
+        lock=True,
+        exclusive=False,
+        append_only=False,
+        make_parent_dirs=False,
+        v1_or_v2=False,
     ):
     ):
         """actual remoting is done via self.call in the @api decorator"""
         """actual remoting is done via self.call in the @api decorator"""
 
 

+ 18 - 2
src/borg/remote3.py

@@ -373,7 +373,15 @@ class RepositoryServer:  # pragma: no cover
         return os.path.realpath(path)
         return os.path.realpath(path)
 
 
     def open(
     def open(
-        self, path, create=False, lock_wait=None, lock=True, exclusive=None, append_only=False, make_parent_dirs=False, v1_or_v2=False
+        self,
+        path,
+        create=False,
+        lock_wait=None,
+        lock=True,
+        exclusive=None,
+        append_only=False,
+        make_parent_dirs=False,
+        v1_or_v2=False,
     ):
     ):
         self.RepoCls = Repository if v1_or_v2 else Repository3
         self.RepoCls = Repository if v1_or_v2 else Repository3
         self.rpc_methods = self._rpc_methods if v1_or_v2 else self._rpc_methods3
         self.rpc_methods = self._rpc_methods if v1_or_v2 else self._rpc_methods3
@@ -975,7 +983,15 @@ class RemoteRepository3:
         v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True},  # TODO fix version
         v1_or_v2={"since": parse_version("2.0.0b8"), "previously": True},  # TODO fix version
     )
     )
     def open(
     def open(
-        self, path, create=False, lock_wait=None, lock=True, exclusive=False, append_only=False, make_parent_dirs=False, v1_or_v2=False
+        self,
+        path,
+        create=False,
+        lock_wait=None,
+        lock=True,
+        exclusive=False,
+        append_only=False,
+        make_parent_dirs=False,
+        v1_or_v2=False,
     ):
     ):
         """actual remoting is done via self.call in the @api decorator"""
         """actual remoting is done via self.call in the @api decorator"""
 
 

+ 3 - 3
src/borg/repoobj.py

@@ -22,7 +22,7 @@ class RepoObj:
         # used for crypto type detection
         # used for crypto type detection
         hdr_size = cls.obj_header.size
         hdr_size = cls.obj_header.size
         hdr = cls.ObjHeader(*cls.obj_header.unpack(data[:hdr_size]))
         hdr = cls.ObjHeader(*cls.obj_header.unpack(data[:hdr_size]))
-        return data[hdr_size + hdr.meta_size:]
+        return data[hdr_size + hdr.meta_size :]
 
 
     def __init__(self, key):
     def __init__(self, key):
         self.key = key
         self.key = key
@@ -80,7 +80,7 @@ class RepoObj:
         hdr_size = self.obj_header.size
         hdr_size = self.obj_header.size
         hdr = self.ObjHeader(*self.obj_header.unpack(obj[:hdr_size]))
         hdr = self.ObjHeader(*self.obj_header.unpack(obj[:hdr_size]))
         assert hdr_size + hdr.meta_size <= len(obj)
         assert hdr_size + hdr.meta_size <= len(obj)
-        meta_encrypted = obj[hdr_size:hdr_size + hdr.meta_size]
+        meta_encrypted = obj[hdr_size : hdr_size + hdr.meta_size]
         meta_packed = self.key.decrypt(id, meta_encrypted)
         meta_packed = self.key.decrypt(id, meta_encrypted)
         meta = msgpack.unpackb(meta_packed)
         meta = msgpack.unpackb(meta_packed)
         if ro_type != ROBJ_DONTCARE and meta["type"] != ro_type:
         if ro_type != ROBJ_DONTCARE and meta["type"] != ro_type:
@@ -114,7 +114,7 @@ class RepoObj:
         if ro_type != ROBJ_DONTCARE and meta_compressed["type"] != ro_type:
         if ro_type != ROBJ_DONTCARE and meta_compressed["type"] != ro_type:
             raise IntegrityError(f"ro_type expected: {ro_type} got: {meta_compressed['type']}")
             raise IntegrityError(f"ro_type expected: {ro_type} got: {meta_compressed['type']}")
         assert hdr_size + hdr.meta_size + hdr.data_size <= len(obj)
         assert hdr_size + hdr.meta_size + hdr.data_size <= len(obj)
-        data_encrypted = obj[hdr_size + hdr.meta_size:hdr_size + hdr.meta_size + hdr.data_size]
+        data_encrypted = obj[hdr_size + hdr.meta_size : hdr_size + hdr.meta_size + hdr.data_size]
         data_compressed = self.key.decrypt(id, data_encrypted)  # does not include the type/level bytes
         data_compressed = self.key.decrypt(id, data_encrypted)  # does not include the type/level bytes
         if decompress:
         if decompress:
             ctype = meta_compressed["ctype"]
             ctype = meta_compressed["ctype"]

+ 15 - 14
src/borg/repository3.py

@@ -104,7 +104,7 @@ class Repository3:
         self._send_log = send_log_cb or (lambda: None)
         self._send_log = send_log_cb or (lambda: None)
         self.do_create = create
         self.do_create = create
         self.created = False
         self.created = False
-        self.acceptable_repo_versions = (3, )
+        self.acceptable_repo_versions = (3,)
         self.opened = False
         self.opened = False
         self.append_only = append_only  # XXX not implemented / not implementable
         self.append_only = append_only  # XXX not implemented / not implementable
         self.storage_quota = storage_quota  # XXX not implemented
         self.storage_quota = storage_quota  # XXX not implemented
@@ -196,7 +196,13 @@ class Repository3:
 
 
     def info(self):
     def info(self):
         """return some infos about the repo (must be opened first)"""
         """return some infos about the repo (must be opened first)"""
-        info = dict(id=self.id, version=self.version, storage_quota_use=self.storage_quota_use, storage_quota=self.storage_quota, append_only=self.append_only)
+        info = dict(
+            id=self.id,
+            version=self.version,
+            storage_quota_use=self.storage_quota_use,
+            storage_quota=self.storage_quota,
+            append_only=self.append_only,
+        )
         return info
         return info
 
 
     def commit(self, compact=True, threshold=0.1):
     def commit(self, compact=True, threshold=0.1):
@@ -204,6 +210,7 @@ class Repository3:
 
 
     def check(self, repair=False, max_duration=0):
     def check(self, repair=False, max_duration=0):
         """Check repository consistency"""
         """Check repository consistency"""
+
         def log_error(msg):
         def log_error(msg):
             nonlocal obj_corrupted
             nonlocal obj_corrupted
             obj_corrupted = True
             obj_corrupted = True
@@ -228,12 +235,12 @@ class Repository3:
                 obj_size = len(obj)
                 obj_size = len(obj)
                 if obj_size >= hdr_size:
                 if obj_size >= hdr_size:
                     hdr = RepoObj.ObjHeader(*RepoObj.obj_header.unpack(obj[:hdr_size]))
                     hdr = RepoObj.ObjHeader(*RepoObj.obj_header.unpack(obj[:hdr_size]))
-                    meta = obj[hdr_size:hdr_size+hdr.meta_size]
+                    meta = obj[hdr_size : hdr_size + hdr.meta_size]
                     if hdr.meta_size != len(meta):
                     if hdr.meta_size != len(meta):
                         log_error("metadata size incorrect.")
                         log_error("metadata size incorrect.")
                     elif hdr.meta_hash != xxh64(meta):
                     elif hdr.meta_hash != xxh64(meta):
                         log_error("metadata does not match checksum.")
                         log_error("metadata does not match checksum.")
-                    data = obj[hdr_size+hdr.meta_size:hdr_size+hdr.meta_size+hdr.data_size]
+                    data = obj[hdr_size + hdr.meta_size : hdr_size + hdr.meta_size + hdr.data_size]
                     if hdr.data_size != len(data):
                     if hdr.data_size != len(data):
                         log_error("data size incorrect.")
                         log_error("data size incorrect.")
                     elif hdr.data_hash != xxh64(data):
                     elif hdr.data_hash != xxh64(data):
@@ -276,12 +283,11 @@ class Repository3:
             ids = []
             ids = []
         if marker is not None:
         if marker is not None:
             idx = ids.index(marker)
             idx = ids.index(marker)
-            ids = ids[idx + 1:]
+            ids = ids[idx + 1 :]
         if limit is not None:
         if limit is not None:
             return ids[:limit]
             return ids[:limit]
         return ids
         return ids
 
 
-
     def scan(self, limit=None, state=None):
     def scan(self, limit=None, state=None):
         """
         """
         list (the next) <limit> chunk IDs from the repository.
         list (the next) <limit> chunk IDs from the repository.
@@ -312,24 +318,19 @@ class Repository3:
                 obj = self.store.load(key, size=hdr_size + extra_size)
                 obj = self.store.load(key, size=hdr_size + extra_size)
                 hdr = obj[0:hdr_size]
                 hdr = obj[0:hdr_size]
                 if len(hdr) != hdr_size:
                 if len(hdr) != hdr_size:
-                    raise IntegrityError(
-                        f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes"
-                    )
+                    raise IntegrityError(f"Object too small [id {id_hex}]: expected {hdr_size}, got {len(hdr)} bytes")
                 meta_size = RepoObj.obj_header.unpack(hdr)[0]
                 meta_size = RepoObj.obj_header.unpack(hdr)[0]
                 if meta_size > extra_size:
                 if meta_size > extra_size:
                     # we did not get enough, need to load more, but not all.
                     # we did not get enough, need to load more, but not all.
                     # this should be rare, as chunk metadata is rather small usually.
                     # this should be rare, as chunk metadata is rather small usually.
                     obj = self.store.load(key, size=hdr_size + meta_size)
                     obj = self.store.load(key, size=hdr_size + meta_size)
-                meta = obj[hdr_size:hdr_size + meta_size]
+                meta = obj[hdr_size : hdr_size + meta_size]
                 if len(meta) != meta_size:
                 if len(meta) != meta_size:
-                    raise IntegrityError(
-                        f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes"
-                    )
+                    raise IntegrityError(f"Object too small [id {id_hex}]: expected {meta_size}, got {len(meta)} bytes")
                 return hdr + meta
                 return hdr + meta
         except StoreObjectNotFound:
         except StoreObjectNotFound:
             raise self.ObjectNotFound(id, self.path) from None
             raise self.ObjectNotFound(id, self.path) from None
 
 
-
     def get_many(self, ids, read_data=True, is_preloaded=False):
     def get_many(self, ids, read_data=True, is_preloaded=False):
         for id_ in ids:
         for id_ in ids:
             yield self.get(id_, read_data=read_data)
             yield self.get(id_, read_data=read_data)

+ 1 - 5
src/borg/testsuite/locking3.py

@@ -4,11 +4,7 @@ import pytest
 
 
 from borgstore.store import Store
 from borgstore.store import Store
 
 
-from ..locking3 import (
-    Lock,
-    LockFailed,
-    NotLocked,
-)
+from ..locking3 import Lock, LockFailed, NotLocked
 
 
 ID1 = "foo", 1, 1
 ID1 = "foo", 1, 1
 ID2 = "bar", 2, 2
 ID2 = "bar", 2, 2