Quellcode durchsuchen

hashindex: remove .compact

our on disk formats only store used keys/values,
so they are always compact on-disk.
Thomas Waldmann vor 7 Monaten
Ursprung
Commit
68143d6f99
4 geänderte Dateien mit 7 neuen und 16 gelöschten Zeilen
  1. 1 1
      src/borg/archiver/compact_cmd.py
  2. 4 7
      src/borg/cache.py
  3. 0 6
      src/borg/hashindex.pyx
  4. 2 2
      src/borg/repository.py

+ 1 - 1
src/borg/archiver/compact_cmd.py

@@ -65,7 +65,7 @@ class ArchiveGarbageCollector:
             # as we put the wrong size in there, we need to clean up the size:
             self.chunks[id] = ChunkIndexEntry(refcount=ChunkIndex.MAX_VALUE, size=0)
         # now self.chunks is an uptodate ChunkIndex, usable for general borg usage!
-        write_chunkindex_to_repo_cache(self.repository, self.chunks, compact=True, clear=True, force_write=True)
+        write_chunkindex_to_repo_cache(self.repository, self.chunks, clear=True, force_write=True)
         self.chunks = None  # nothing there (cleared!)
 
     def analyze_archives(self) -> Tuple[Set, Set, int, int, int]:

+ 4 - 7
src/borg/cache.py

@@ -630,11 +630,8 @@ def load_chunks_hash(repository) -> bytes:
     return hash
 
 
-def write_chunkindex_to_repo_cache(repository, chunks, *, compact=False, clear=False, force_write=False):
+def write_chunkindex_to_repo_cache(repository, chunks, *, clear=False, force_write=False):
     cached_hash = load_chunks_hash(repository)
-    if compact:
-        # if we don't need the in-memory chunks index anymore:
-        chunks.compact()  # vacuum the hash table
     with io.BytesIO() as f:
         chunks.write(f)
         data = f.getvalue()
@@ -698,7 +695,7 @@ def build_chunkindex_from_repo(repository, *, disable_caches=False, cache_immedi
     logger.debug(f"queried {num_chunks} chunk IDs in {duration} s, ~{speed}/s")
     if cache_immediately:
         # immediately update cache/chunks, so we only rarely have to do it the slow way:
-        write_chunkindex_to_repo_cache(repository, chunks, compact=False, clear=False, force_write=True)
+        write_chunkindex_to_repo_cache(repository, chunks, clear=False, force_write=True)
     return chunks
 
 
@@ -770,8 +767,8 @@ class ChunksMixin:
         return ChunkListEntry(id, size)
 
     def _write_chunks_cache(self, chunks):
-        # this is called from .close, so we can clear/compact here:
-        write_chunkindex_to_repo_cache(self.repository, self._chunks, compact=True, clear=True)
+        # this is called from .close, so we can clear here:
+        write_chunkindex_to_repo_cache(self.repository, self._chunks, clear=True)
         self._chunks = None  # nothing there (cleared!)
 
     def refresh_lock(self, now):

+ 0 - 6
src/borg/hashindex.pyx

@@ -54,9 +54,6 @@ class ChunkIndex:
         refcount = min(self.MAX_VALUE, v.refcount + refs)
         self[key] = v._replace(refcount=refcount, size=size)
 
-    def compact(self):
-        return 0
-
     def clear(self):
         pass
 
@@ -155,9 +152,6 @@ class NSIndex1:
             else:
                 do_yield = key == marker
 
-    def compact(self):
-        return 0
-
     def clear(self):
         pass
 

+ 2 - 2
src/borg/repository.py

@@ -193,7 +193,7 @@ class Repository:
             # to build the ChunkIndex the slow way by listing all the directories.
             from borg.cache import write_chunkindex_to_repo_cache
 
-            write_chunkindex_to_repo_cache(self, ChunkIndex(), compact=True, clear=True, force_write=True)
+            write_chunkindex_to_repo_cache(self, ChunkIndex(), clear=True, force_write=True)
         finally:
             self.store.close()
 
@@ -385,7 +385,7 @@ class Repository:
                     # if we did a full pass in one go, we built a complete, uptodate ChunkIndex, cache it!
                     from .cache import write_chunkindex_to_repo_cache
 
-                    write_chunkindex_to_repo_cache(self, chunks, compact=True, clear=True, force_write=True)
+                    write_chunkindex_to_repo_cache(self, chunks, clear=True, force_write=True)
         except StoreObjectNotFound:
             # it can be that there is no "data/" at all, then it crashes when iterating infos.
             pass