|
@@ -643,14 +643,14 @@ Duration: {0.duration}
|
|
|
# so we can already remove it here, the next .save() will then commit this cleanup.
|
|
|
# remove its manifest entry, remove its ArchiveItem chunk, remove its item_ptrs chunks:
|
|
|
del self.manifest.archives[self.checkpoint_name]
|
|
|
- self.cache.chunk_decref(self.id, self.stats)
|
|
|
+ self.cache.chunk_decref(self.id, 1, self.stats)
|
|
|
for id in metadata.item_ptrs:
|
|
|
- self.cache.chunk_decref(id, self.stats)
|
|
|
+ self.cache.chunk_decref(id, 1, self.stats)
|
|
|
# also get rid of that part item, we do not want to have it in next checkpoint or final archive
|
|
|
tail_chunks = self.items_buffer.restore_chunks_state()
|
|
|
# tail_chunks contain the tail of the archive items metadata stream, not needed for next commit.
|
|
|
for id in tail_chunks:
|
|
|
- self.cache.chunk_decref(id, self.stats)
|
|
|
+ self.cache.chunk_decref(id, 1, self.stats) # TODO can we have real size here?
|
|
|
|
|
|
def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
|
|
|
name = name or self.name
|
|
@@ -1024,7 +1024,7 @@ Duration: {0.duration}
|
|
|
new_id = self.key.id_hash(data)
|
|
|
self.cache.add_chunk(new_id, {}, data, stats=self.stats, ro_type=ROBJ_ARCHIVE_META)
|
|
|
self.manifest.archives[self.name] = (new_id, metadata.time)
|
|
|
- self.cache.chunk_decref(self.id, self.stats)
|
|
|
+ self.cache.chunk_decref(self.id, 1, self.stats)
|
|
|
self.id = new_id
|
|
|
|
|
|
def rename(self, name):
|
|
@@ -1052,9 +1052,9 @@ Duration: {0.duration}
|
|
|
error = True
|
|
|
return exception_ignored # must not return None here
|
|
|
|
|
|
- def chunk_decref(id, stats):
|
|
|
+ def chunk_decref(id, size, stats):
|
|
|
try:
|
|
|
- self.cache.chunk_decref(id, stats, wait=False)
|
|
|
+ self.cache.chunk_decref(id, size, stats, wait=False)
|
|
|
except KeyError:
|
|
|
cid = bin_to_hex(id)
|
|
|
raise ChunksIndexError(cid)
|
|
@@ -1073,13 +1073,13 @@ Duration: {0.duration}
|
|
|
pi.show(i)
|
|
|
_, data = self.repo_objs.parse(items_id, data, ro_type=ROBJ_ARCHIVE_STREAM)
|
|
|
unpacker.feed(data)
|
|
|
- chunk_decref(items_id, stats)
|
|
|
+ chunk_decref(items_id, 1, stats)
|
|
|
try:
|
|
|
for item in unpacker:
|
|
|
item = Item(internal_dict=item)
|
|
|
if "chunks" in item:
|
|
|
for chunk_id, size in item.chunks:
|
|
|
- chunk_decref(chunk_id, stats)
|
|
|
+ chunk_decref(chunk_id, size, stats)
|
|
|
except (TypeError, ValueError):
|
|
|
# if items metadata spans multiple chunks and one chunk got dropped somehow,
|
|
|
# it could be that unpacker yields bad types
|
|
@@ -1096,12 +1096,12 @@ Duration: {0.duration}
|
|
|
|
|
|
# delete the blocks that store all the references that end up being loaded into metadata.items:
|
|
|
for id in self.metadata.item_ptrs:
|
|
|
- chunk_decref(id, stats)
|
|
|
+ chunk_decref(id, 1, stats)
|
|
|
|
|
|
# in forced delete mode, we try hard to delete at least the manifest entry,
|
|
|
# if possible also the archive superblock, even if processing the items raises
|
|
|
# some harmless exception.
|
|
|
- chunk_decref(self.id, stats)
|
|
|
+ chunk_decref(self.id, 1, stats)
|
|
|
del self.manifest.archives[self.name]
|
|
|
while fetch_async_response(wait=True) is not None:
|
|
|
# we did async deletes, process outstanding results (== exceptions),
|
|
@@ -1510,7 +1510,7 @@ class FilesystemObjectProcessors:
|
|
|
except BackupOSError:
|
|
|
# see comments in process_file's exception handler, same issue here.
|
|
|
for chunk in item.get("chunks", []):
|
|
|
- cache.chunk_decref(chunk.id, self.stats, wait=False)
|
|
|
+ cache.chunk_decref(chunk.id, chunk.size, self.stats, wait=False)
|
|
|
raise
|
|
|
else:
|
|
|
item.get_size(memorize=True)
|
|
@@ -1544,7 +1544,7 @@ class FilesystemObjectProcessors:
|
|
|
item.chunks = []
|
|
|
for chunk_id, chunk_size in hl_chunks:
|
|
|
# process one-by-one, so we will know in item.chunks how far we got
|
|
|
- chunk_entry = cache.chunk_incref(chunk_id, self.stats)
|
|
|
+ chunk_entry = cache.chunk_incref(chunk_id, chunk_size, self.stats)
|
|
|
item.chunks.append(chunk_entry)
|
|
|
else: # normal case, no "2nd+" hardlink
|
|
|
if not is_special_file:
|
|
@@ -1570,10 +1570,8 @@ class FilesystemObjectProcessors:
|
|
|
item.chunks = []
|
|
|
for chunk in chunks:
|
|
|
# process one-by-one, so we will know in item.chunks how far we got
|
|
|
- chunk_entry = cache.chunk_incref(chunk.id, self.stats)
|
|
|
- # chunk.size is from files cache, chunk_entry.size from index:
|
|
|
- assert chunk == chunk_entry
|
|
|
- item.chunks.append(chunk_entry)
|
|
|
+ cache.chunk_incref(chunk.id, chunk.size, self.stats)
|
|
|
+ item.chunks.append(chunk)
|
|
|
status = "U" # regular file, unchanged
|
|
|
else:
|
|
|
status = "M" if known else "A" # regular file, modified or added
|
|
@@ -1622,7 +1620,7 @@ class FilesystemObjectProcessors:
|
|
|
# but we will not add an item (see add_item in create_helper) and thus
|
|
|
# they would be orphaned chunks in case that we commit the transaction.
|
|
|
for chunk in item.get("chunks", []):
|
|
|
- cache.chunk_decref(chunk.id, self.stats, wait=False)
|
|
|
+ cache.chunk_decref(chunk.id, chunk.size, self.stats, wait=False)
|
|
|
# Now that we have cleaned up the chunk references, we can re-raise the exception.
|
|
|
# This will skip processing of this file, but might retry or continue with the next one.
|
|
|
raise
|
|
@@ -1733,7 +1731,7 @@ class TarfileObjectProcessors:
|
|
|
except BackupOSError:
|
|
|
# see comment in FilesystemObjectProcessors.process_file, same issue here.
|
|
|
for chunk in item.get("chunks", []):
|
|
|
- self.cache.chunk_decref(chunk.id, self.stats, wait=False)
|
|
|
+ self.cache.chunk_decref(chunk.id, chunk.size, self.stats, wait=False)
|
|
|
raise
|
|
|
|
|
|
|
|
@@ -2446,7 +2444,7 @@ class ArchiveRecreater:
|
|
|
def process_chunks(self, archive, target, item):
|
|
|
if not target.recreate_rechunkify:
|
|
|
for chunk_id, size in item.chunks:
|
|
|
- self.cache.chunk_incref(chunk_id, target.stats)
|
|
|
+ self.cache.chunk_incref(chunk_id, size, target.stats)
|
|
|
return item.chunks
|
|
|
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
|
|
|
chunk_processor = partial(self.chunk_processor, target)
|
|
@@ -2454,8 +2452,9 @@ class ArchiveRecreater:
|
|
|
|
|
|
def chunk_processor(self, target, chunk):
|
|
|
chunk_id, data = cached_hash(chunk, self.key.id_hash)
|
|
|
+ size = len(data)
|
|
|
if chunk_id in self.seen_chunks:
|
|
|
- return self.cache.chunk_incref(chunk_id, target.stats)
|
|
|
+ return self.cache.chunk_incref(chunk_id, size, target.stats)
|
|
|
chunk_entry = self.cache.add_chunk(chunk_id, {}, data, stats=target.stats, wait=False, ro_type=ROBJ_FILE_STREAM)
|
|
|
self.cache.repository.async_response(wait=False)
|
|
|
self.seen_chunks.add(chunk_entry.id)
|