|
@@ -15,6 +15,7 @@ from ..helpers import msgpack
|
|
from ..locking import Lock, LockFailed
|
|
from ..locking import Lock, LockFailed
|
|
from ..remote import RemoteRepository, InvalidRPCMethod, PathNotAllowed, handle_remote_line
|
|
from ..remote import RemoteRepository, InvalidRPCMethod, PathNotAllowed, handle_remote_line
|
|
from ..repository import Repository, LoggedIO, MAGIC, MAX_DATA_SIZE, TAG_DELETE, TAG_PUT2, TAG_PUT, TAG_COMMIT
|
|
from ..repository import Repository, LoggedIO, MAGIC, MAX_DATA_SIZE, TAG_DELETE, TAG_PUT2, TAG_PUT, TAG_COMMIT
|
|
|
|
+from ..repoobj import RepoObj
|
|
from . import BaseTestCase
|
|
from . import BaseTestCase
|
|
from .hashindex import H
|
|
from .hashindex import H
|
|
|
|
|
|
@@ -22,6 +23,29 @@ from .hashindex import H
|
|
UNSPECIFIED = object() # for default values where we can't use None
|
|
UNSPECIFIED = object() # for default values where we can't use None
|
|
|
|
|
|
|
|
|
|
|
|
+def fchunk(data, meta=b""):
|
|
|
|
+ # create a raw chunk that has valid RepoObj layout, but does not use encryption or compression.
|
|
|
|
+ meta_len = RepoObj.meta_len_hdr.pack(len(meta))
|
|
|
|
+ assert isinstance(data, bytes)
|
|
|
|
+ chunk = meta_len + meta + data
|
|
|
|
+ return chunk
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def pchunk(chunk):
|
|
|
|
+ # parse data and meta from a raw chunk made by fchunk
|
|
|
|
+ meta_len_size = RepoObj.meta_len_hdr.size
|
|
|
|
+ meta_len = chunk[:meta_len_size]
|
|
|
|
+ meta_len = RepoObj.meta_len_hdr.unpack(meta_len)[0]
|
|
|
|
+ meta = chunk[meta_len_size : meta_len_size + meta_len]
|
|
|
|
+ data = chunk[meta_len_size + meta_len :]
|
|
|
|
+ return data, meta
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def pdchunk(chunk):
|
|
|
|
+ # parse only data from a raw chunk made by fchunk
|
|
|
|
+ return pchunk(chunk)[0]
|
|
|
|
+
|
|
|
|
+
|
|
class RepositoryTestCaseBase(BaseTestCase):
|
|
class RepositoryTestCaseBase(BaseTestCase):
|
|
key_size = 32
|
|
key_size = 32
|
|
exclusive = True
|
|
exclusive = True
|
|
@@ -46,12 +70,12 @@ class RepositoryTestCaseBase(BaseTestCase):
|
|
self.repository = self.open(exclusive=exclusive)
|
|
self.repository = self.open(exclusive=exclusive)
|
|
|
|
|
|
def add_keys(self):
|
|
def add_keys(self):
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.repository.put(H(1), b"bar")
|
|
|
|
- self.repository.put(H(3), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.repository.put(H(1), fchunk(b"bar"))
|
|
|
|
+ self.repository.put(H(3), fchunk(b"bar"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.repository.put(H(1), b"bar2")
|
|
|
|
- self.repository.put(H(2), b"boo")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"bar2"))
|
|
|
|
+ self.repository.put(H(2), fchunk(b"boo"))
|
|
self.repository.delete(H(3))
|
|
self.repository.delete(H(3))
|
|
|
|
|
|
def repo_dump(self, label=None):
|
|
def repo_dump(self, label=None):
|
|
@@ -60,7 +84,7 @@ class RepositoryTestCaseBase(BaseTestCase):
|
|
H_trans[None] = -1 # key == None appears in commits
|
|
H_trans[None] = -1 # key == None appears in commits
|
|
tag_trans = {TAG_PUT2: "put2", TAG_PUT: "put", TAG_DELETE: "del", TAG_COMMIT: "comm"}
|
|
tag_trans = {TAG_PUT2: "put2", TAG_PUT: "put", TAG_DELETE: "del", TAG_COMMIT: "comm"}
|
|
for segment, fn in self.repository.io.segment_iterator():
|
|
for segment, fn in self.repository.io.segment_iterator():
|
|
- for tag, key, offset, size in self.repository.io.iter_objects(segment):
|
|
|
|
|
|
+ for tag, key, offset, size, _ in self.repository.io.iter_objects(segment):
|
|
print("%s%s H(%d) -> %s[%d..+%d]" % (label, tag_trans[tag], H_trans[key], fn, offset, size))
|
|
print("%s%s H(%d) -> %s[%d..+%d]" % (label, tag_trans[tag], H_trans[key], fn, offset, size))
|
|
print()
|
|
print()
|
|
|
|
|
|
@@ -68,9 +92,9 @@ class RepositoryTestCaseBase(BaseTestCase):
|
|
class RepositoryTestCase(RepositoryTestCaseBase):
|
|
class RepositoryTestCase(RepositoryTestCaseBase):
|
|
def test1(self):
|
|
def test1(self):
|
|
for x in range(100):
|
|
for x in range(100):
|
|
- self.repository.put(H(x), b"SOMEDATA")
|
|
|
|
|
|
+ self.repository.put(H(x), fchunk(b"SOMEDATA"))
|
|
key50 = H(50)
|
|
key50 = H(50)
|
|
- self.assert_equal(self.repository.get(key50), b"SOMEDATA")
|
|
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(key50)), b"SOMEDATA")
|
|
self.repository.delete(key50)
|
|
self.repository.delete(key50)
|
|
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50))
|
|
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
@@ -80,55 +104,66 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
for x in range(100):
|
|
for x in range(100):
|
|
if x == 50:
|
|
if x == 50:
|
|
continue
|
|
continue
|
|
- self.assert_equal(repository2.get(H(x)), b"SOMEDATA")
|
|
|
|
|
|
+ self.assert_equal(pdchunk(repository2.get(H(x))), b"SOMEDATA")
|
|
|
|
|
|
def test2(self):
|
|
def test2(self):
|
|
"""Test multiple sequential transactions"""
|
|
"""Test multiple sequential transactions"""
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.repository.put(H(1), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.repository.put(H(1), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repository.delete(H(0))
|
|
self.repository.delete(H(0))
|
|
- self.repository.put(H(1), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"bar"))
|
|
|
|
+ self.repository.commit(compact=False)
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(1))), b"bar")
|
|
|
|
+
|
|
|
|
+ def test_read_data(self):
|
|
|
|
+ meta, data = b"meta", b"data"
|
|
|
|
+ meta_len = RepoObj.meta_len_hdr.pack(len(meta))
|
|
|
|
+ chunk_complete = meta_len + meta + data
|
|
|
|
+ chunk_short = meta_len + meta
|
|
|
|
+ self.repository.put(H(0), chunk_complete)
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.assert_equal(self.repository.get(H(1)), b"bar")
|
|
|
|
|
|
+ self.assert_equal(self.repository.get(H(0)), chunk_complete)
|
|
|
|
+ self.assert_equal(self.repository.get(H(0), read_data=True), chunk_complete)
|
|
|
|
+ self.assert_equal(self.repository.get(H(0), read_data=False), chunk_short)
|
|
|
|
|
|
def test_consistency(self):
|
|
def test_consistency(self):
|
|
"""Test cache consistency"""
|
|
"""Test cache consistency"""
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo")
|
|
|
|
- self.repository.put(H(0), b"foo2")
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo2")
|
|
|
|
- self.repository.put(H(0), b"bar")
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo2"))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
|
|
|
|
+ self.repository.put(H(0), fchunk(b"bar"))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"bar")
|
|
self.repository.delete(H(0))
|
|
self.repository.delete(H(0))
|
|
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(H(0)))
|
|
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(H(0)))
|
|
|
|
|
|
def test_consistency2(self):
|
|
def test_consistency2(self):
|
|
"""Test cache consistency2"""
|
|
"""Test cache consistency2"""
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.repository.put(H(0), b"foo2")
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo2")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo2"))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
|
|
self.repository.rollback()
|
|
self.repository.rollback()
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo")
|
|
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
|
|
|
|
|
|
def test_overwrite_in_same_transaction(self):
|
|
def test_overwrite_in_same_transaction(self):
|
|
"""Test cache consistency2"""
|
|
"""Test cache consistency2"""
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.repository.put(H(0), b"foo2")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo2"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.assert_equal(self.repository.get(H(0)), b"foo2")
|
|
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
|
|
|
|
|
|
def test_single_kind_transactions(self):
|
|
def test_single_kind_transactions(self):
|
|
# put
|
|
# put
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repository.close()
|
|
self.repository.close()
|
|
# replace
|
|
# replace
|
|
self.repository = self.open()
|
|
self.repository = self.open()
|
|
with self.repository:
|
|
with self.repository:
|
|
- self.repository.put(H(0), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"bar"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
# delete
|
|
# delete
|
|
self.repository = self.open()
|
|
self.repository = self.open()
|
|
@@ -138,7 +173,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def test_list(self):
|
|
def test_list(self):
|
|
for x in range(100):
|
|
for x in range(100):
|
|
- self.repository.put(H(x), b"SOMEDATA")
|
|
|
|
|
|
+ self.repository.put(H(x), fchunk(b"SOMEDATA"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
all = self.repository.list()
|
|
all = self.repository.list()
|
|
self.assert_equal(len(all), 100)
|
|
self.assert_equal(len(all), 100)
|
|
@@ -152,7 +187,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def test_scan(self):
|
|
def test_scan(self):
|
|
for x in range(100):
|
|
for x in range(100):
|
|
- self.repository.put(H(x), b"SOMEDATA")
|
|
|
|
|
|
+ self.repository.put(H(x), fchunk(b"SOMEDATA"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
all = self.repository.scan()
|
|
all = self.repository.scan()
|
|
assert len(all) == 100
|
|
assert len(all) == 100
|
|
@@ -168,14 +203,14 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
assert all[x] == H(x)
|
|
assert all[x] == H(x)
|
|
|
|
|
|
def test_max_data_size(self):
|
|
def test_max_data_size(self):
|
|
- max_data = b"x" * MAX_DATA_SIZE
|
|
|
|
- self.repository.put(H(0), max_data)
|
|
|
|
- self.assert_equal(self.repository.get(H(0)), max_data)
|
|
|
|
- self.assert_raises(IntegrityError, lambda: self.repository.put(H(1), max_data + b"x"))
|
|
|
|
|
|
+ max_data = b"x" * (MAX_DATA_SIZE - RepoObj.meta_len_hdr.size)
|
|
|
|
+ self.repository.put(H(0), fchunk(max_data))
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), max_data)
|
|
|
|
+ self.assert_raises(IntegrityError, lambda: self.repository.put(H(1), fchunk(max_data + b"x")))
|
|
|
|
|
|
def test_set_flags(self):
|
|
def test_set_flags(self):
|
|
id = H(0)
|
|
id = H(0)
|
|
- self.repository.put(id, b"")
|
|
|
|
|
|
+ self.repository.put(id, fchunk(b""))
|
|
self.assert_equal(self.repository.flags(id), 0x00000000) # init == all zero
|
|
self.assert_equal(self.repository.flags(id), 0x00000000) # init == all zero
|
|
self.repository.flags(id, mask=0x00000001, value=0x00000001)
|
|
self.repository.flags(id, mask=0x00000001, value=0x00000001)
|
|
self.assert_equal(self.repository.flags(id), 0x00000001)
|
|
self.assert_equal(self.repository.flags(id), 0x00000001)
|
|
@@ -188,7 +223,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def test_get_flags(self):
|
|
def test_get_flags(self):
|
|
id = H(0)
|
|
id = H(0)
|
|
- self.repository.put(id, b"")
|
|
|
|
|
|
+ self.repository.put(id, fchunk(b""))
|
|
self.assert_equal(self.repository.flags(id), 0x00000000) # init == all zero
|
|
self.assert_equal(self.repository.flags(id), 0x00000000) # init == all zero
|
|
self.repository.flags(id, mask=0xC0000003, value=0x80000001)
|
|
self.repository.flags(id, mask=0xC0000003, value=0x80000001)
|
|
self.assert_equal(self.repository.flags(id, mask=0x00000001), 0x00000001)
|
|
self.assert_equal(self.repository.flags(id, mask=0x00000001), 0x00000001)
|
|
@@ -199,7 +234,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
def test_flags_many(self):
|
|
def test_flags_many(self):
|
|
ids_flagged = [H(0), H(1)]
|
|
ids_flagged = [H(0), H(1)]
|
|
ids_default_flags = [H(2), H(3)]
|
|
ids_default_flags = [H(2), H(3)]
|
|
- [self.repository.put(id, b"") for id in ids_flagged + ids_default_flags]
|
|
|
|
|
|
+ [self.repository.put(id, fchunk(b"")) for id in ids_flagged + ids_default_flags]
|
|
self.repository.flags_many(ids_flagged, mask=0xFFFFFFFF, value=0xDEADBEEF)
|
|
self.repository.flags_many(ids_flagged, mask=0xFFFFFFFF, value=0xDEADBEEF)
|
|
self.assert_equal(list(self.repository.flags_many(ids_default_flags)), [0x00000000, 0x00000000])
|
|
self.assert_equal(list(self.repository.flags_many(ids_default_flags)), [0x00000000, 0x00000000])
|
|
self.assert_equal(list(self.repository.flags_many(ids_flagged)), [0xDEADBEEF, 0xDEADBEEF])
|
|
self.assert_equal(list(self.repository.flags_many(ids_flagged)), [0xDEADBEEF, 0xDEADBEEF])
|
|
@@ -207,8 +242,8 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|
self.assert_equal(list(self.repository.flags_many(ids_flagged, mask=0x0000FFFF)), [0x0000BEEF, 0x0000BEEF])
|
|
self.assert_equal(list(self.repository.flags_many(ids_flagged, mask=0x0000FFFF)), [0x0000BEEF, 0x0000BEEF])
|
|
|
|
|
|
def test_flags_persistence(self):
|
|
def test_flags_persistence(self):
|
|
- self.repository.put(H(0), b"default")
|
|
|
|
- self.repository.put(H(1), b"one one zero")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"default"))
|
|
|
|
+ self.repository.put(H(1), fchunk(b"one one zero"))
|
|
# we do not set flags for H(0), so we can later check their default state.
|
|
# we do not set flags for H(0), so we can later check their default state.
|
|
self.repository.flags(H(1), mask=0x00000007, value=0x00000006)
|
|
self.repository.flags(H(1), mask=0x00000007, value=0x00000006)
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
@@ -227,38 +262,39 @@ class LocalRepositoryTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def _assert_sparse(self):
|
|
def _assert_sparse(self):
|
|
# The superseded 123456... PUT
|
|
# The superseded 123456... PUT
|
|
- assert self.repository.compact[0] == 41 + 8 + 9
|
|
|
|
|
|
+ assert self.repository.compact[0] == 41 + 8 + len(fchunk(b"123456789"))
|
|
# a COMMIT
|
|
# a COMMIT
|
|
assert self.repository.compact[1] == 9
|
|
assert self.repository.compact[1] == 9
|
|
# The DELETE issued by the superseding PUT (or issued directly)
|
|
# The DELETE issued by the superseding PUT (or issued directly)
|
|
assert self.repository.compact[2] == 41
|
|
assert self.repository.compact[2] == 41
|
|
self.repository._rebuild_sparse(0)
|
|
self.repository._rebuild_sparse(0)
|
|
- assert self.repository.compact[0] == 41 + 8 + 9
|
|
|
|
|
|
+ assert self.repository.compact[0] == 41 + 8 + len(fchunk(b"123456789")) # 9 is chunk or commit?
|
|
|
|
|
|
def test_sparse1(self):
|
|
def test_sparse1(self):
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.repository.put(H(1), b"123456789")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.repository.put(H(1), fchunk(b"123456789"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.repository.put(H(1), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"bar"))
|
|
self._assert_sparse()
|
|
self._assert_sparse()
|
|
|
|
|
|
def test_sparse2(self):
|
|
def test_sparse2(self):
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
- self.repository.put(H(1), b"123456789")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
|
|
+ self.repository.put(H(1), fchunk(b"123456789"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repository.delete(H(1))
|
|
self.repository.delete(H(1))
|
|
self._assert_sparse()
|
|
self._assert_sparse()
|
|
|
|
|
|
def test_sparse_delete(self):
|
|
def test_sparse_delete(self):
|
|
- self.repository.put(H(0), b"1245")
|
|
|
|
|
|
+ ch0 = fchunk(b"1245")
|
|
|
|
+ self.repository.put(H(0), ch0)
|
|
self.repository.delete(H(0))
|
|
self.repository.delete(H(0))
|
|
self.repository.io._write_fd.sync()
|
|
self.repository.io._write_fd.sync()
|
|
|
|
|
|
# The on-line tracking works on a per-object basis...
|
|
# The on-line tracking works on a per-object basis...
|
|
- assert self.repository.compact[0] == 41 + 8 + 41 + 4
|
|
|
|
|
|
+ assert self.repository.compact[0] == 41 + 8 + 41 + len(ch0)
|
|
self.repository._rebuild_sparse(0)
|
|
self.repository._rebuild_sparse(0)
|
|
# ...while _rebuild_sparse can mark whole segments as completely sparse (which then includes the segment magic)
|
|
# ...while _rebuild_sparse can mark whole segments as completely sparse (which then includes the segment magic)
|
|
- assert self.repository.compact[0] == 41 + 8 + 41 + 4 + len(MAGIC)
|
|
|
|
|
|
+ assert self.repository.compact[0] == 41 + 8 + 41 + len(ch0) + len(MAGIC)
|
|
|
|
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()]
|
|
assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()]
|
|
@@ -266,7 +302,7 @@ class LocalRepositoryTestCase(RepositoryTestCaseBase):
|
|
def test_uncommitted_garbage(self):
|
|
def test_uncommitted_garbage(self):
|
|
# uncommitted garbage should be no problem, it is cleaned up automatically.
|
|
# uncommitted garbage should be no problem, it is cleaned up automatically.
|
|
# we just have to be careful with invalidation of cached FDs in LoggedIO.
|
|
# we just have to be careful with invalidation of cached FDs in LoggedIO.
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
# write some crap to a uncommitted segment file
|
|
# write some crap to a uncommitted segment file
|
|
last_segment = self.repository.io.get_latest_segment()
|
|
last_segment = self.repository.io.get_latest_segment()
|
|
@@ -276,7 +312,7 @@ class LocalRepositoryTestCase(RepositoryTestCaseBase):
|
|
# usually, opening the repo and starting a transaction should trigger a cleanup.
|
|
# usually, opening the repo and starting a transaction should trigger a cleanup.
|
|
self.repository = self.open()
|
|
self.repository = self.open()
|
|
with self.repository:
|
|
with self.repository:
|
|
- self.repository.put(H(0), b"bar") # this may trigger compact_segments()
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"bar")) # this may trigger compact_segments()
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
# the point here is that nothing blows up with an exception.
|
|
# the point here is that nothing blows up with an exception.
|
|
|
|
|
|
@@ -363,8 +399,8 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
assert not io.is_committed_segment(io.get_latest_segment())
|
|
assert not io.is_committed_segment(io.get_latest_segment())
|
|
|
|
|
|
def test_moved_deletes_are_tracked(self):
|
|
def test_moved_deletes_are_tracked(self):
|
|
- self.repository.put(H(1), b"1")
|
|
|
|
- self.repository.put(H(2), b"2")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"1"))
|
|
|
|
+ self.repository.put(H(2), fchunk(b"2"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repo_dump("p1 p2 c")
|
|
self.repo_dump("p1 p2 c")
|
|
self.repository.delete(H(1))
|
|
self.repository.delete(H(1))
|
|
@@ -372,19 +408,19 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
self.repo_dump("d1 cc")
|
|
self.repo_dump("d1 cc")
|
|
last_segment = self.repository.io.get_latest_segment() - 1
|
|
last_segment = self.repository.io.get_latest_segment() - 1
|
|
num_deletes = 0
|
|
num_deletes = 0
|
|
- for tag, key, offset, size in self.repository.io.iter_objects(last_segment):
|
|
|
|
|
|
+ for tag, key, offset, size, _ in self.repository.io.iter_objects(last_segment):
|
|
if tag == TAG_DELETE:
|
|
if tag == TAG_DELETE:
|
|
assert key == H(1)
|
|
assert key == H(1)
|
|
num_deletes += 1
|
|
num_deletes += 1
|
|
assert num_deletes == 1
|
|
assert num_deletes == 1
|
|
assert last_segment in self.repository.compact
|
|
assert last_segment in self.repository.compact
|
|
- self.repository.put(H(3), b"3")
|
|
|
|
|
|
+ self.repository.put(H(3), fchunk(b"3"))
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
self.repo_dump("p3 cc")
|
|
self.repo_dump("p3 cc")
|
|
assert last_segment not in self.repository.compact
|
|
assert last_segment not in self.repository.compact
|
|
assert not self.repository.io.segment_exists(last_segment)
|
|
assert not self.repository.io.segment_exists(last_segment)
|
|
for segment, _ in self.repository.io.segment_iterator():
|
|
for segment, _ in self.repository.io.segment_iterator():
|
|
- for tag, key, offset, size in self.repository.io.iter_objects(segment):
|
|
|
|
|
|
+ for tag, key, offset, size, _ in self.repository.io.iter_objects(segment):
|
|
assert tag != TAG_DELETE
|
|
assert tag != TAG_DELETE
|
|
assert key != H(1)
|
|
assert key != H(1)
|
|
# after compaction, there should be no empty shadowed_segments lists left over.
|
|
# after compaction, there should be no empty shadowed_segments lists left over.
|
|
@@ -393,7 +429,7 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def test_shadowed_entries_are_preserved(self):
|
|
def test_shadowed_entries_are_preserved(self):
|
|
get_latest_segment = self.repository.io.get_latest_segment
|
|
get_latest_segment = self.repository.io.get_latest_segment
|
|
- self.repository.put(H(1), b"1")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"1"))
|
|
# This is the segment with our original PUT of interest
|
|
# This is the segment with our original PUT of interest
|
|
put_segment = get_latest_segment()
|
|
put_segment = get_latest_segment()
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
@@ -401,7 +437,7 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
# We now delete H(1), and force this segment to not be compacted, which can happen
|
|
# We now delete H(1), and force this segment to not be compacted, which can happen
|
|
# if it's not sparse enough (symbolized by H(2) here).
|
|
# if it's not sparse enough (symbolized by H(2) here).
|
|
self.repository.delete(H(1))
|
|
self.repository.delete(H(1))
|
|
- self.repository.put(H(2), b"1")
|
|
|
|
|
|
+ self.repository.put(H(2), fchunk(b"1"))
|
|
delete_segment = get_latest_segment()
|
|
delete_segment = get_latest_segment()
|
|
|
|
|
|
# We pretend these are mostly dense (not sparse) and won't be compacted
|
|
# We pretend these are mostly dense (not sparse) and won't be compacted
|
|
@@ -426,7 +462,7 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
assert H(1) not in self.repository
|
|
assert H(1) not in self.repository
|
|
|
|
|
|
def test_shadow_index_rollback(self):
|
|
def test_shadow_index_rollback(self):
|
|
- self.repository.put(H(1), b"1")
|
|
|
|
|
|
+ self.repository.put(H(1), fchunk(b"1"))
|
|
self.repository.delete(H(1))
|
|
self.repository.delete(H(1))
|
|
assert self.repository.shadow_index[H(1)] == [0]
|
|
assert self.repository.shadow_index[H(1)] == [0]
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
@@ -440,7 +476,7 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|
assert self.repository.shadow_index[H(1)] == [4]
|
|
assert self.repository.shadow_index[H(1)] == [4]
|
|
self.repository.rollback()
|
|
self.repository.rollback()
|
|
self.repo_dump("r")
|
|
self.repo_dump("r")
|
|
- self.repository.put(H(2), b"1")
|
|
|
|
|
|
+ self.repository.put(H(2), fchunk(b"1"))
|
|
# After the rollback segment 4 shouldn't be considered anymore
|
|
# After the rollback segment 4 shouldn't be considered anymore
|
|
assert self.repository.shadow_index[H(1)] == [] # because the delete is considered unstable
|
|
assert self.repository.shadow_index[H(1)] == [] # because the delete is considered unstable
|
|
|
|
|
|
@@ -459,19 +495,19 @@ class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase):
|
|
def segments_in_repository():
|
|
def segments_in_repository():
|
|
return len(list(self.repository.io.segment_iterator()))
|
|
return len(list(self.repository.io.segment_iterator()))
|
|
|
|
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
|
|
|
|
self.repository.append_only = False
|
|
self.repository.append_only = False
|
|
assert segments_in_repository() == 2
|
|
assert segments_in_repository() == 2
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
# normal: compact squashes the data together, only one segment
|
|
# normal: compact squashes the data together, only one segment
|
|
assert segments_in_repository() == 2
|
|
assert segments_in_repository() == 2
|
|
|
|
|
|
self.repository.append_only = True
|
|
self.repository.append_only = True
|
|
assert segments_in_repository() == 2
|
|
assert segments_in_repository() == 2
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
# append only: does not compact, only new segments written
|
|
# append only: does not compact, only new segments written
|
|
assert segments_in_repository() == 4
|
|
assert segments_in_repository() == 4
|
|
@@ -485,7 +521,7 @@ class RepositoryFreeSpaceTestCase(RepositoryTestCaseBase):
|
|
self.reopen()
|
|
self.reopen()
|
|
|
|
|
|
with self.repository:
|
|
with self.repository:
|
|
- self.repository.put(H(0), b"foobar")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foobar"))
|
|
with pytest.raises(Repository.InsufficientFreeSpaceError):
|
|
with pytest.raises(Repository.InsufficientFreeSpaceError):
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
assert os.path.exists(self.repository.path)
|
|
assert os.path.exists(self.repository.path)
|
|
@@ -500,45 +536,52 @@ class RepositoryFreeSpaceTestCase(RepositoryTestCaseBase):
|
|
class QuotaTestCase(RepositoryTestCaseBase):
|
|
class QuotaTestCase(RepositoryTestCaseBase):
|
|
def test_tracking(self):
|
|
def test_tracking(self):
|
|
assert self.repository.storage_quota_use == 0
|
|
assert self.repository.storage_quota_use == 0
|
|
- self.repository.put(H(1), bytes(1234))
|
|
|
|
- assert self.repository.storage_quota_use == 1234 + 41 + 8
|
|
|
|
- self.repository.put(H(2), bytes(5678))
|
|
|
|
- assert self.repository.storage_quota_use == 1234 + 5678 + 2 * (41 + 8)
|
|
|
|
|
|
+ ch1 = fchunk(bytes(1234))
|
|
|
|
+ self.repository.put(H(1), ch1)
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + 41 + 8
|
|
|
|
+ ch2 = fchunk(bytes(5678))
|
|
|
|
+ self.repository.put(H(2), ch2)
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)
|
|
self.repository.delete(H(1))
|
|
self.repository.delete(H(1))
|
|
- assert self.repository.storage_quota_use == 1234 + 5678 + 2 * (41 + 8) # we have not compacted yet
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8) # we have not compacted yet
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- assert self.repository.storage_quota_use == 1234 + 5678 + 2 * (41 + 8) # we have not compacted yet
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8) # we have not compacted yet
|
|
self.reopen()
|
|
self.reopen()
|
|
with self.repository:
|
|
with self.repository:
|
|
# Open new transaction; hints and thus quota data is not loaded unless needed.
|
|
# Open new transaction; hints and thus quota data is not loaded unless needed.
|
|
- self.repository.put(H(3), b"")
|
|
|
|
|
|
+ ch3 = fchunk(b"")
|
|
|
|
+ self.repository.put(H(3), ch3)
|
|
self.repository.delete(H(3))
|
|
self.repository.delete(H(3))
|
|
- assert self.repository.storage_quota_use == 1234 + 5678 + 3 * (41 + 8) # we have not compacted yet
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + len(ch3) + 3 * (
|
|
|
|
+ 41 + 8
|
|
|
|
+ ) # we have not compacted yet
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
- assert self.repository.storage_quota_use == 5678 + 41 + 8
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch2) + 41 + 8
|
|
|
|
|
|
def test_exceed_quota(self):
|
|
def test_exceed_quota(self):
|
|
assert self.repository.storage_quota_use == 0
|
|
assert self.repository.storage_quota_use == 0
|
|
self.repository.storage_quota = 80
|
|
self.repository.storage_quota = 80
|
|
- self.repository.put(H(1), b"")
|
|
|
|
- assert self.repository.storage_quota_use == 41 + 8
|
|
|
|
|
|
+ ch1 = fchunk(b"x" * 7)
|
|
|
|
+ self.repository.put(H(1), ch1)
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + 41 + 8
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
with pytest.raises(Repository.StorageQuotaExceeded):
|
|
with pytest.raises(Repository.StorageQuotaExceeded):
|
|
- self.repository.put(H(2), b"")
|
|
|
|
- assert self.repository.storage_quota_use == (41 + 8) * 2
|
|
|
|
|
|
+ ch2 = fchunk(b"y" * 13)
|
|
|
|
+ self.repository.put(H(2), ch2)
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2 # check ch2!?
|
|
with pytest.raises(Repository.StorageQuotaExceeded):
|
|
with pytest.raises(Repository.StorageQuotaExceeded):
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- assert self.repository.storage_quota_use == (41 + 8) * 2
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2 # check ch2!?
|
|
self.reopen()
|
|
self.reopen()
|
|
with self.repository:
|
|
with self.repository:
|
|
self.repository.storage_quota = 150
|
|
self.repository.storage_quota = 150
|
|
# Open new transaction; hints and thus quota data is not loaded unless needed.
|
|
# Open new transaction; hints and thus quota data is not loaded unless needed.
|
|
- self.repository.put(H(1), b"")
|
|
|
|
|
|
+ self.repository.put(H(1), ch1)
|
|
assert (
|
|
assert (
|
|
- self.repository.storage_quota_use == (41 + 8) * 2
|
|
|
|
|
|
+ self.repository.storage_quota_use == len(ch1) * 2 + (41 + 8) * 2
|
|
) # we have 2 puts for H(1) here and not yet compacted.
|
|
) # we have 2 puts for H(1) here and not yet compacted.
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
- assert self.repository.storage_quota_use == 41 + 8 # now we have compacted.
|
|
|
|
|
|
+ assert self.repository.storage_quota_use == len(ch1) + 41 + 8 # now we have compacted.
|
|
|
|
|
|
|
|
|
|
class NonceReservation(RepositoryTestCaseBase):
|
|
class NonceReservation(RepositoryTestCaseBase):
|
|
@@ -586,13 +629,13 @@ class NonceReservation(RepositoryTestCaseBase):
|
|
class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
def setUp(self):
|
|
def setUp(self):
|
|
super().setUp()
|
|
super().setUp()
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repository.close()
|
|
self.repository.close()
|
|
|
|
|
|
def do_commit(self):
|
|
def do_commit(self):
|
|
with self.repository:
|
|
with self.repository:
|
|
- self.repository.put(H(0), b"fox")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"fox"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
|
|
|
|
def test_corrupted_hints(self):
|
|
def test_corrupted_hints(self):
|
|
@@ -648,7 +691,7 @@ class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
# Data corruption is detected due to mismatching checksums
|
|
# Data corruption is detected due to mismatching checksums
|
|
# and fixed by rebuilding the index.
|
|
# and fixed by rebuilding the index.
|
|
assert len(self.repository) == 1
|
|
assert len(self.repository) == 1
|
|
- assert self.repository.get(H(0)) == b"foo"
|
|
|
|
|
|
+ assert pdchunk(self.repository.get(H(0))) == b"foo"
|
|
|
|
|
|
def test_index_corrupted_without_integrity(self):
|
|
def test_index_corrupted_without_integrity(self):
|
|
self._corrupt_index()
|
|
self._corrupt_index()
|
|
@@ -684,17 +727,17 @@ class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
with self.repository:
|
|
with self.repository:
|
|
# No issues accessing the repository
|
|
# No issues accessing the repository
|
|
assert len(self.repository) == 1
|
|
assert len(self.repository) == 1
|
|
- assert self.repository.get(H(0)) == b"foo"
|
|
|
|
|
|
+ assert pdchunk(self.repository.get(H(0))) == b"foo"
|
|
|
|
|
|
def _subtly_corrupted_hints_setup(self):
|
|
def _subtly_corrupted_hints_setup(self):
|
|
with self.repository:
|
|
with self.repository:
|
|
self.repository.append_only = True
|
|
self.repository.append_only = True
|
|
assert len(self.repository) == 1
|
|
assert len(self.repository) == 1
|
|
- assert self.repository.get(H(0)) == b"foo"
|
|
|
|
- self.repository.put(H(1), b"bar")
|
|
|
|
- self.repository.put(H(2), b"baz")
|
|
|
|
|
|
+ assert pdchunk(self.repository.get(H(0))) == b"foo"
|
|
|
|
+ self.repository.put(H(1), fchunk(b"bar"))
|
|
|
|
+ self.repository.put(H(2), fchunk(b"baz"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
- self.repository.put(H(2), b"bazz")
|
|
|
|
|
|
+ self.repository.put(H(2), fchunk(b"bazz"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
|
|
|
|
hints_path = os.path.join(self.repository.path, "hints.5")
|
|
hints_path = os.path.join(self.repository.path, "hints.5")
|
|
@@ -711,14 +754,14 @@ class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
self._subtly_corrupted_hints_setup()
|
|
self._subtly_corrupted_hints_setup()
|
|
with self.repository:
|
|
with self.repository:
|
|
self.repository.append_only = False
|
|
self.repository.append_only = False
|
|
- self.repository.put(H(3), b"1234")
|
|
|
|
|
|
+ self.repository.put(H(3), fchunk(b"1234"))
|
|
# Do a compaction run. Succeeds, since the failed checksum prompted a rebuild of the index+hints.
|
|
# Do a compaction run. Succeeds, since the failed checksum prompted a rebuild of the index+hints.
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
|
|
|
|
assert len(self.repository) == 4
|
|
assert len(self.repository) == 4
|
|
- assert self.repository.get(H(0)) == b"foo"
|
|
|
|
- assert self.repository.get(H(1)) == b"bar"
|
|
|
|
- assert self.repository.get(H(2)) == b"bazz"
|
|
|
|
|
|
+ assert pdchunk(self.repository.get(H(0))) == b"foo"
|
|
|
|
+ assert pdchunk(self.repository.get(H(1))) == b"bar"
|
|
|
|
+ assert pdchunk(self.repository.get(H(2))) == b"bazz"
|
|
|
|
|
|
def test_subtly_corrupted_hints_without_integrity(self):
|
|
def test_subtly_corrupted_hints_without_integrity(self):
|
|
self._subtly_corrupted_hints_setup()
|
|
self._subtly_corrupted_hints_setup()
|
|
@@ -726,7 +769,7 @@ class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
|
os.unlink(integrity_path)
|
|
os.unlink(integrity_path)
|
|
with self.repository:
|
|
with self.repository:
|
|
self.repository.append_only = False
|
|
self.repository.append_only = False
|
|
- self.repository.put(H(3), b"1234")
|
|
|
|
|
|
+ self.repository.put(H(3), fchunk(b"1234"))
|
|
# Do a compaction run. Fails, since the corrupted refcount was not detected and leads to an assertion failure.
|
|
# Do a compaction run. Fails, since the corrupted refcount was not detected and leads to an assertion failure.
|
|
with pytest.raises(AssertionError) as exc_info:
|
|
with pytest.raises(AssertionError) as exc_info:
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
@@ -748,12 +791,12 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|
|
|
|
|
def get_objects(self, *ids):
|
|
def get_objects(self, *ids):
|
|
for id_ in ids:
|
|
for id_ in ids:
|
|
- self.repository.get(H(id_))
|
|
|
|
|
|
+ pdchunk(self.repository.get(H(id_)))
|
|
|
|
|
|
def add_objects(self, segments):
|
|
def add_objects(self, segments):
|
|
for ids in segments:
|
|
for ids in segments:
|
|
for id_ in ids:
|
|
for id_ in ids:
|
|
- self.repository.put(H(id_), b"data")
|
|
|
|
|
|
+ self.repository.put(H(id_), fchunk(b"data"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
|
|
|
|
def get_head(self):
|
|
def get_head(self):
|
|
@@ -859,8 +902,8 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
|
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
|
|
|
|
|
def test_crash_before_compact(self):
|
|
def test_crash_before_compact(self):
|
|
- self.repository.put(H(0), b"data")
|
|
|
|
- self.repository.put(H(0), b"data2")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"data"))
|
|
|
|
+ self.repository.put(H(0), fchunk(b"data2"))
|
|
# Simulate a crash before compact
|
|
# Simulate a crash before compact
|
|
with patch.object(Repository, "compact_segments") as compact:
|
|
with patch.object(Repository, "compact_segments") as compact:
|
|
self.repository.commit(compact=True)
|
|
self.repository.commit(compact=True)
|
|
@@ -868,12 +911,12 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|
self.reopen()
|
|
self.reopen()
|
|
with self.repository:
|
|
with self.repository:
|
|
self.check(repair=True)
|
|
self.check(repair=True)
|
|
- self.assert_equal(self.repository.get(H(0)), b"data2")
|
|
|
|
|
|
+ self.assert_equal(pdchunk(self.repository.get(H(0))), b"data2")
|
|
|
|
|
|
|
|
|
|
class RepositoryHintsTestCase(RepositoryTestCaseBase):
|
|
class RepositoryHintsTestCase(RepositoryTestCaseBase):
|
|
def test_hints_persistence(self):
|
|
def test_hints_persistence(self):
|
|
- self.repository.put(H(0), b"data")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"data"))
|
|
self.repository.delete(H(0))
|
|
self.repository.delete(H(0))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
shadow_index_expected = self.repository.shadow_index
|
|
shadow_index_expected = self.repository.shadow_index
|
|
@@ -884,7 +927,7 @@ class RepositoryHintsTestCase(RepositoryTestCaseBase):
|
|
self.reopen()
|
|
self.reopen()
|
|
with self.repository:
|
|
with self.repository:
|
|
# see also do_compact()
|
|
# see also do_compact()
|
|
- self.repository.put(H(42), b"foobar") # this will call prepare_txn() and load the hints data
|
|
|
|
|
|
+ self.repository.put(H(42), fchunk(b"foobar")) # this will call prepare_txn() and load the hints data
|
|
# check if hints persistence worked:
|
|
# check if hints persistence worked:
|
|
self.assert_equal(shadow_index_expected, self.repository.shadow_index)
|
|
self.assert_equal(shadow_index_expected, self.repository.shadow_index)
|
|
self.assert_equal(compact_expected, self.repository.compact)
|
|
self.assert_equal(compact_expected, self.repository.compact)
|
|
@@ -892,7 +935,7 @@ class RepositoryHintsTestCase(RepositoryTestCaseBase):
|
|
self.assert_equal(segments_expected, self.repository.segments)
|
|
self.assert_equal(segments_expected, self.repository.segments)
|
|
|
|
|
|
def test_hints_behaviour(self):
|
|
def test_hints_behaviour(self):
|
|
- self.repository.put(H(0), b"data")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"data"))
|
|
self.assert_equal(self.repository.shadow_index, {})
|
|
self.assert_equal(self.repository.shadow_index, {})
|
|
assert len(self.repository.compact) == 0
|
|
assert len(self.repository.compact) == 0
|
|
self.repository.delete(H(0))
|
|
self.repository.delete(H(0))
|
|
@@ -901,7 +944,7 @@ class RepositoryHintsTestCase(RepositoryTestCaseBase):
|
|
self.assert_in(H(0), self.repository.shadow_index)
|
|
self.assert_in(H(0), self.repository.shadow_index)
|
|
self.assert_equal(len(self.repository.shadow_index[H(0)]), 1)
|
|
self.assert_equal(len(self.repository.shadow_index[H(0)]), 1)
|
|
self.assert_in(0, self.repository.compact) # segment 0 can be compacted
|
|
self.assert_in(0, self.repository.compact) # segment 0 can be compacted
|
|
- self.repository.put(H(42), b"foobar") # see also do_compact()
|
|
|
|
|
|
+ self.repository.put(H(42), fchunk(b"foobar")) # see also do_compact()
|
|
self.repository.commit(compact=True, threshold=0.0) # compact completely!
|
|
self.repository.commit(compact=True, threshold=0.0) # compact completely!
|
|
# nothing to compact any more! no info left about stuff that does not exist any more:
|
|
# nothing to compact any more! no info left about stuff that does not exist any more:
|
|
self.assert_not_in(H(0), self.repository.shadow_index)
|
|
self.assert_not_in(H(0), self.repository.shadow_index)
|
|
@@ -1041,13 +1084,13 @@ class RemoteLegacyFree(RepositoryTestCaseBase):
|
|
|
|
|
|
def test_legacy_free(self):
|
|
def test_legacy_free(self):
|
|
# put
|
|
# put
|
|
- self.repository.put(H(0), b"foo")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"foo"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
self.repository.close()
|
|
self.repository.close()
|
|
# replace
|
|
# replace
|
|
self.repository = self.open()
|
|
self.repository = self.open()
|
|
with self.repository:
|
|
with self.repository:
|
|
- self.repository.put(H(0), b"bar")
|
|
|
|
|
|
+ self.repository.put(H(0), fchunk(b"bar"))
|
|
self.repository.commit(compact=False)
|
|
self.repository.commit(compact=False)
|
|
# delete
|
|
# delete
|
|
self.repository = self.open()
|
|
self.repository = self.open()
|