Browse Source

Repo tests - conversion to pytest (#7626)

Ted Lawson 1 year ago
parent
commit
33645ad38a
1 changed files with 922 additions and 814 deletions
  1. 922 814
      src/borg/testsuite/repository.py

+ 922 - 814
src/borg/testsuite/repository.py

@@ -1,8 +1,7 @@
 import logging
 import os
-import shutil
 import sys
-import tempfile
+from typing import Optional
 from unittest.mock import patch
 
 import pytest
@@ -12,14 +11,64 @@ from ..helpers import Location
 from ..helpers import IntegrityError
 from ..helpers import msgpack
 from ..locking import Lock, LockFailed
+from ..platformflags import is_win32
 from ..remote import RemoteRepository, InvalidRPCMethod, PathNotAllowed
 from ..repository import Repository, LoggedIO, MAGIC, MAX_DATA_SIZE, TAG_DELETE, TAG_PUT2, TAG_PUT, TAG_COMMIT
 from ..repoobj import RepoObj
-from . import BaseTestCase
 from .hashindex import H
 
 
-UNSPECIFIED = object()  # for default values where we can't use None
+@pytest.fixture()
+def repository(tmp_path):
+    repository_location = os.fspath(tmp_path / "repository")
+    yield Repository(repository_location, exclusive=True, create=True)
+
+
+@pytest.fixture()
+def remote_repository(tmp_path):
+    if is_win32:
+        pytest.skip("Remote repository does not yet work on Windows.")
+    repository_location = Location("ssh://__testsuite__" + os.fspath(tmp_path / "repository"))
+    yield RemoteRepository(repository_location, exclusive=True, create=True)
+
+
+def pytest_generate_tests(metafunc):
+    # Generates tests that run on both local and remote repos
+    if "repo_fixtures" in metafunc.fixturenames:
+        metafunc.parametrize("repo_fixtures", ["repository", "remote_repository"])
+
+
+def get_repository_from_fixture(repo_fixtures, request):
+    # returns the repo object from the fixture for tests that run on both local and remote repos
+    return request.getfixturevalue(repo_fixtures)
+
+
+def reopen(repository, exclusive: Optional[bool] = True, create=False):
+    if isinstance(repository, Repository):
+        if repository.io is not None or repository.lock is not None:
+            raise RuntimeError("Repo must be closed before a reopen. Cannot support nested repository contexts.")
+        return Repository(repository.path, exclusive=exclusive, create=create)
+
+    if isinstance(repository, RemoteRepository):
+        if repository.p is not None or repository.sock is not None:
+            raise RuntimeError("Remote repo must be closed before a reopen. Cannot support nested repository contexts.")
+        return RemoteRepository(repository.location, exclusive=exclusive, create=create)
+
+    raise TypeError(
+        f"Invalid argument type. Expected 'Repository' or 'RemoteRepository', received '{type(repository).__name__}'."
+    )
+
+
+def get_path(repository):
+    if isinstance(repository, Repository):
+        return repository.path
+
+    if isinstance(repository, RemoteRepository):
+        return repository.location.path
+
+    raise TypeError(
+        f"Invalid argument type. Expected 'Repository' or 'RemoteRepository', received '{type(repository).__name__}'."
+    )
 
 
 def fchunk(data, meta=b""):
@@ -45,1010 +94,1069 @@ def pdchunk(chunk):
     return pchunk(chunk)[0]
 
 
-class RepositoryTestCaseBase(BaseTestCase):
-    key_size = 32
-    exclusive = True
-
-    def open(self, create=False, exclusive=UNSPECIFIED):
-        if exclusive is UNSPECIFIED:
-            exclusive = self.exclusive
-        return Repository(os.path.join(self.tmppath, "repository"), exclusive=exclusive, create=create)
-
-    def setUp(self):
-        self.tmppath = tempfile.mkdtemp()
-        self.repository = self.open(create=True)
-        self.repository.__enter__()
-
-    def tearDown(self):
-        self.repository.__exit__(None, None, None)
-        shutil.rmtree(self.tmppath)
-
-    def reopen(self, exclusive=UNSPECIFIED):
-        self.repository.__exit__(None, None, None)
-        self.repository = self.open(exclusive=exclusive)
-        self.repository.__enter__()
-
-    def add_keys(self):
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.put(H(1), fchunk(b"bar"))
-        self.repository.put(H(3), fchunk(b"bar"))
-        self.repository.commit(compact=False)
-        self.repository.put(H(1), fchunk(b"bar2"))
-        self.repository.put(H(2), fchunk(b"boo"))
-        self.repository.delete(H(3))
-
-    def repo_dump(self, label=None):
-        label = label + ": " if label is not None else ""
-        H_trans = {H(i): i for i in range(10)}
-        H_trans[None] = -1  # key == None appears in commits
-        tag_trans = {TAG_PUT2: "put2", TAG_PUT: "put", TAG_DELETE: "del", TAG_COMMIT: "comm"}
-        for segment, fn in self.repository.io.segment_iterator():
-            for tag, key, offset, size, _ in self.repository.io.iter_objects(segment):
-                print("%s%s H(%d) -> %s[%d..+%d]" % (label, tag_trans[tag], H_trans[key], fn, offset, size))
-        print()
-
-
-class RepositoryTestCase(RepositoryTestCaseBase):
-    def test1(self):
+def add_keys(repository):
+    repository.put(H(0), fchunk(b"foo"))
+    repository.put(H(1), fchunk(b"bar"))
+    repository.put(H(3), fchunk(b"bar"))
+    repository.commit(compact=False)
+    repository.put(H(1), fchunk(b"bar2"))
+    repository.put(H(2), fchunk(b"boo"))
+    repository.delete(H(3))
+
+
+def repo_dump(repository, label=None):
+    label = label + ": " if label is not None else ""
+    H_trans = {H(i): i for i in range(10)}
+    H_trans[None] = -1  # key == None appears in commits
+    tag_trans = {TAG_PUT2: "put2", TAG_PUT: "put", TAG_DELETE: "del", TAG_COMMIT: "comm"}
+    for segment, fn in repository.io.segment_iterator():
+        for tag, key, offset, size, _ in repository.io.iter_objects(segment):
+            print("%s%s H(%d) -> %s[%d..+%d]" % (label, tag_trans[tag], H_trans[key], fn, offset, size))
+    print()
+
+
+def test_basic_operations(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         for x in range(100):
-            self.repository.put(H(x), fchunk(b"SOMEDATA"))
+            repository.put(H(x), fchunk(b"SOMEDATA"))
         key50 = H(50)
-        self.assert_equal(pdchunk(self.repository.get(key50)), b"SOMEDATA")
-        self.repository.delete(key50)
-        self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50))
-        self.repository.commit(compact=False)
-        self.reopen()
-        self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50))
+        assert pdchunk(repository.get(key50)) == b"SOMEDATA"
+        repository.delete(key50)
+        with pytest.raises(Repository.ObjectNotFound):
+            repository.get(key50)
+        repository.commit(compact=False)
+    with reopen(repository) as repository:
+        with pytest.raises(Repository.ObjectNotFound):
+            repository.get(key50)
         for x in range(100):
             if x == 50:
                 continue
-            self.assert_equal(pdchunk(self.repository.get(H(x))), b"SOMEDATA")
-
-    def test2(self):
-        """Test multiple sequential transactions"""
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.put(H(1), fchunk(b"foo"))
-        self.repository.commit(compact=False)
-        self.repository.delete(H(0))
-        self.repository.put(H(1), fchunk(b"bar"))
-        self.repository.commit(compact=False)
-        self.assert_equal(pdchunk(self.repository.get(H(1))), b"bar")
-
-    def test_read_data(self):
+            assert pdchunk(repository.get(H(x))) == b"SOMEDATA"
+
+
+def test_multiple_transactions(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.put(H(1), fchunk(b"foo"))
+        repository.commit(compact=False)
+        repository.delete(H(0))
+        repository.put(H(1), fchunk(b"bar"))
+        repository.commit(compact=False)
+        assert pdchunk(repository.get(H(1))) == b"bar"
+
+
+def test_read_data(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         meta, data = b"meta", b"data"
         meta_len = RepoObj.meta_len_hdr.pack(len(meta))
         chunk_complete = meta_len + meta + data
         chunk_short = meta_len + meta
-        self.repository.put(H(0), chunk_complete)
-        self.repository.commit(compact=False)
-        self.assert_equal(self.repository.get(H(0)), chunk_complete)
-        self.assert_equal(self.repository.get(H(0), read_data=True), chunk_complete)
-        self.assert_equal(self.repository.get(H(0), read_data=False), chunk_short)
-
-    def test_consistency(self):
-        """Test cache consistency"""
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
-        self.repository.put(H(0), fchunk(b"foo2"))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
-        self.repository.put(H(0), fchunk(b"bar"))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"bar")
-        self.repository.delete(H(0))
-        self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(H(0)))
-
-    def test_consistency2(self):
-        """Test cache consistency2"""
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
-        self.repository.commit(compact=False)
-        self.repository.put(H(0), fchunk(b"foo2"))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
-        self.repository.rollback()
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo")
-
-    def test_overwrite_in_same_transaction(self):
-        """Test cache consistency2"""
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.put(H(0), fchunk(b"foo2"))
-        self.repository.commit(compact=False)
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"foo2")
-
-    def test_single_kind_transactions(self):
-        # put
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=False)
-        self.reopen()
-        # replace
-        self.repository.put(H(0), fchunk(b"bar"))
-        self.repository.commit(compact=False)
-        self.reopen()
-        # delete
-        self.repository.delete(H(0))
-        self.repository.commit(compact=False)
-
-    def test_list(self):
+        repository.put(H(0), chunk_complete)
+        repository.commit(compact=False)
+        assert repository.get(H(0)) == chunk_complete
+        assert repository.get(H(0), read_data=True) == chunk_complete
+        assert repository.get(H(0), read_data=False) == chunk_short
+
+
+def test_consistency(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"foo"))
+        assert pdchunk(repository.get(H(0))) == b"foo"
+        repository.put(H(0), fchunk(b"foo2"))
+        assert pdchunk(repository.get(H(0))) == b"foo2"
+        repository.put(H(0), fchunk(b"bar"))
+        assert pdchunk(repository.get(H(0))) == b"bar"
+        repository.delete(H(0))
+        with pytest.raises(Repository.ObjectNotFound):
+            repository.get(H(0))
+
+
+def test_consistency2(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"foo"))
+        assert pdchunk(repository.get(H(0))) == b"foo"
+        repository.commit(compact=False)
+        repository.put(H(0), fchunk(b"foo2"))
+        assert pdchunk(repository.get(H(0))) == b"foo2"
+        repository.rollback()
+        assert pdchunk(repository.get(H(0))) == b"foo"
+
+
+def test_overwrite_in_same_transaction(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.put(H(0), fchunk(b"foo2"))
+        repository.commit(compact=False)
+        assert pdchunk(repository.get(H(0))) == b"foo2"
+
+
+def test_single_kind_transactions(repo_fixtures, request):
+    # put
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=False)
+    # replace
+    with reopen(repository) as repository:
+        repository.put(H(0), fchunk(b"bar"))
+        repository.commit(compact=False)
+    # delete
+    with reopen(repository) as repository:
+        repository.delete(H(0))
+        repository.commit(compact=False)
+
+
+def test_list(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         for x in range(100):
-            self.repository.put(H(x), fchunk(b"SOMEDATA"))
-        self.repository.commit(compact=False)
-        all = self.repository.list()
-        self.assert_equal(len(all), 100)
-        first_half = self.repository.list(limit=50)
-        self.assert_equal(len(first_half), 50)
-        self.assert_equal(first_half, all[:50])
-        second_half = self.repository.list(marker=first_half[-1])
-        self.assert_equal(len(second_half), 50)
-        self.assert_equal(second_half, all[50:])
-        self.assert_equal(len(self.repository.list(limit=50)), 50)
-
-    def test_scan(self):
+            repository.put(H(x), fchunk(b"SOMEDATA"))
+        repository.commit(compact=False)
+        repo_list = repository.list()
+        assert len(repo_list) == 100
+        first_half = repository.list(limit=50)
+        assert len(first_half) == 50
+        assert first_half == repo_list[:50]
+        second_half = repository.list(marker=first_half[-1])
+        assert len(second_half) == 50
+        assert second_half == repo_list[50:]
+        assert len(repository.list(limit=50)) == 50
+
+
+def test_scan(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         for x in range(100):
-            self.repository.put(H(x), fchunk(b"SOMEDATA"))
-        self.repository.commit(compact=False)
-        all, _ = self.repository.scan()
-        assert len(all) == 100
-        first_half, state = self.repository.scan(limit=50)
+            repository.put(H(x), fchunk(b"SOMEDATA"))
+        repository.commit(compact=False)
+        ids, _ = repository.scan()
+        assert len(ids) == 100
+        first_half, state = repository.scan(limit=50)
         assert len(first_half) == 50
-        assert first_half == all[:50]
-        second_half, _ = self.repository.scan(state=state)
+        assert first_half == ids[:50]
+        second_half, _ = repository.scan(state=state)
         assert len(second_half) == 50
-        assert second_half == all[50:]
+        assert second_half == ids[50:]
         # check result order == on-disk order (which is hash order)
         for x in range(100):
-            assert all[x] == H(x)
+            assert ids[x] == H(x)
+
 
-    def test_scan_modify(self):
+def test_scan_modify(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         for x in range(100):
-            self.repository.put(H(x), fchunk(b"ORIGINAL"))
-        self.repository.commit(compact=False)
+            repository.put(H(x), fchunk(b"ORIGINAL"))
+        repository.commit(compact=False)
         # now we scan, read and modify chunks at the same time
         count = 0
-        ids, _ = self.repository.scan()
+        ids, _ = repository.scan()
         for id in ids:
             # scan results are in same order as we put the chunks into the repo (into the segment file)
             assert id == H(count)
-            chunk = self.repository.get(id)
+            chunk = repository.get(id)
             # check that we **only** get data that was committed when we started scanning
             # and that we do not run into the new data we put into the repo.
             assert pdchunk(chunk) == b"ORIGINAL"
             count += 1
-            self.repository.put(id, fchunk(b"MODIFIED"))
+            repository.put(id, fchunk(b"MODIFIED"))
         assert count == 100
-        self.repository.commit()
+        repository.commit()
 
         # now we have committed all the modified chunks, and **only** must get the modified ones.
         count = 0
-        ids, _ = self.repository.scan()
+        ids, _ = repository.scan()
         for id in ids:
             # scan results are in same order as we put the chunks into the repo (into the segment file)
             assert id == H(count)
-            chunk = self.repository.get(id)
+            chunk = repository.get(id)
             assert pdchunk(chunk) == b"MODIFIED"
             count += 1
         assert count == 100
 
-    def test_max_data_size(self):
+
+def test_max_data_size(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         max_data = b"x" * (MAX_DATA_SIZE - RepoObj.meta_len_hdr.size)
-        self.repository.put(H(0), fchunk(max_data))
-        self.assert_equal(pdchunk(self.repository.get(H(0))), max_data)
-        self.assert_raises(IntegrityError, lambda: self.repository.put(H(1), fchunk(max_data + b"x")))
+        repository.put(H(0), fchunk(max_data))
+        assert pdchunk(repository.get(H(0))) == max_data
+        with pytest.raises(IntegrityError):
+            repository.put(H(1), fchunk(max_data + b"x"))
 
-    def test_set_flags(self):
+
+def test_set_flags(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         id = H(0)
-        self.repository.put(id, fchunk(b""))
-        self.assert_equal(self.repository.flags(id), 0x00000000)  # init == all zero
-        self.repository.flags(id, mask=0x00000001, value=0x00000001)
-        self.assert_equal(self.repository.flags(id), 0x00000001)
-        self.repository.flags(id, mask=0x00000002, value=0x00000002)
-        self.assert_equal(self.repository.flags(id), 0x00000003)
-        self.repository.flags(id, mask=0x00000001, value=0x00000000)
-        self.assert_equal(self.repository.flags(id), 0x00000002)
-        self.repository.flags(id, mask=0x00000002, value=0x00000000)
-        self.assert_equal(self.repository.flags(id), 0x00000000)
-
-    def test_get_flags(self):
+        repository.put(id, fchunk(b""))
+        assert repository.flags(id) == 0x00000000  # init == all zero
+        repository.flags(id, mask=0x00000001, value=0x00000001)
+        assert repository.flags(id) == 0x00000001
+        repository.flags(id, mask=0x00000002, value=0x00000002)
+        assert repository.flags(id) == 0x00000003
+        repository.flags(id, mask=0x00000001, value=0x00000000)
+        assert repository.flags(id) == 0x00000002
+        repository.flags(id, mask=0x00000002, value=0x00000000)
+        assert repository.flags(id) == 0x00000000
+
+
+def test_get_flags(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         id = H(0)
-        self.repository.put(id, fchunk(b""))
-        self.assert_equal(self.repository.flags(id), 0x00000000)  # init == all zero
-        self.repository.flags(id, mask=0xC0000003, value=0x80000001)
-        self.assert_equal(self.repository.flags(id, mask=0x00000001), 0x00000001)
-        self.assert_equal(self.repository.flags(id, mask=0x00000002), 0x00000000)
-        self.assert_equal(self.repository.flags(id, mask=0x40000008), 0x00000000)
-        self.assert_equal(self.repository.flags(id, mask=0x80000000), 0x80000000)
-
-    def test_flags_many(self):
+        repository.put(id, fchunk(b""))
+        assert repository.flags(id) == 0x00000000  # init == all zero
+        repository.flags(id, mask=0xC0000003, value=0x80000001)
+        assert repository.flags(id, mask=0x00000001) == 0x00000001
+        assert repository.flags(id, mask=0x00000002) == 0x00000000
+        assert repository.flags(id, mask=0x40000008) == 0x00000000
+        assert repository.flags(id, mask=0x80000000) == 0x80000000
+
+
+def test_flags_many(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
         ids_flagged = [H(0), H(1)]
         ids_default_flags = [H(2), H(3)]
-        [self.repository.put(id, fchunk(b"")) for id in ids_flagged + ids_default_flags]
-        self.repository.flags_many(ids_flagged, mask=0xFFFFFFFF, value=0xDEADBEEF)
-        self.assert_equal(list(self.repository.flags_many(ids_default_flags)), [0x00000000, 0x00000000])
-        self.assert_equal(list(self.repository.flags_many(ids_flagged)), [0xDEADBEEF, 0xDEADBEEF])
-        self.assert_equal(list(self.repository.flags_many(ids_flagged, mask=0xFFFF0000)), [0xDEAD0000, 0xDEAD0000])
-        self.assert_equal(list(self.repository.flags_many(ids_flagged, mask=0x0000FFFF)), [0x0000BEEF, 0x0000BEEF])
-
-    def test_flags_persistence(self):
-        self.repository.put(H(0), fchunk(b"default"))
-        self.repository.put(H(1), fchunk(b"one one zero"))
+        [repository.put(id, fchunk(b"")) for id in ids_flagged + ids_default_flags]
+        repository.flags_many(ids_flagged, mask=0xFFFFFFFF, value=0xDEADBEEF)
+        assert list(repository.flags_many(ids_default_flags)) == [0x00000000, 0x00000000]
+        assert list(repository.flags_many(ids_flagged)) == [0xDEADBEEF, 0xDEADBEEF]
+        assert list(repository.flags_many(ids_flagged, mask=0xFFFF0000)) == [0xDEAD0000, 0xDEAD0000]
+        assert list(repository.flags_many(ids_flagged, mask=0x0000FFFF)) == [0x0000BEEF, 0x0000BEEF]
+
+
+def test_flags_persistence(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repository.put(H(0), fchunk(b"default"))
+        repository.put(H(1), fchunk(b"one one zero"))
         # we do not set flags for H(0), so we can later check their default state.
-        self.repository.flags(H(1), mask=0x00000007, value=0x00000006)
-        self.repository.commit(compact=False)
-        self.reopen()
+        repository.flags(H(1), mask=0x00000007, value=0x00000006)
+        repository.commit(compact=False)
+    with reopen(repository) as repository:
         # we query all flags to check if the initial flags were all zero and
         # only the ones we explicitly set to one are as expected.
-        self.assert_equal(self.repository.flags(H(0), mask=0xFFFFFFFF), 0x00000000)
-        self.assert_equal(self.repository.flags(H(1), mask=0xFFFFFFFF), 0x00000006)
-
-
-class LocalRepositoryTestCase(RepositoryTestCaseBase):
-    # test case that doesn't work with remote repositories
-
-    def _assert_sparse(self):
-        # The superseded 123456... PUT
-        assert self.repository.compact[0] == 41 + 8 + len(fchunk(b"123456789"))
-        # a COMMIT
-        assert self.repository.compact[1] == 9
-        # The DELETE issued by the superseding PUT (or issued directly)
-        assert self.repository.compact[2] == 41
-        self.repository._rebuild_sparse(0)
-        assert self.repository.compact[0] == 41 + 8 + len(fchunk(b"123456789"))  # 9 is chunk or commit?
-
-    def test_sparse1(self):
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.put(H(1), fchunk(b"123456789"))
-        self.repository.commit(compact=False)
-        self.repository.put(H(1), fchunk(b"bar"))
-        self._assert_sparse()
-
-    def test_sparse2(self):
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.put(H(1), fchunk(b"123456789"))
-        self.repository.commit(compact=False)
-        self.repository.delete(H(1))
-        self._assert_sparse()
-
-    def test_sparse_delete(self):
-        ch0 = fchunk(b"1245")
-        self.repository.put(H(0), ch0)
-        self.repository.delete(H(0))
-        self.repository.io._write_fd.sync()
-
-        # The on-line tracking works on a per-object basis...
-        assert self.repository.compact[0] == 41 + 8 + 41 + len(ch0)
-        self.repository._rebuild_sparse(0)
+        assert repository.flags(H(0), mask=0xFFFFFFFF) == 0x00000000
+        assert repository.flags(H(1), mask=0xFFFFFFFF) == 0x00000006
+
+
+def _assert_sparse(repository):
+    # the superseded 123456... PUT
+    assert repository.compact[0] == 41 + 8 + len(fchunk(b"123456789"))
+    # a COMMIT
+    assert repository.compact[1] == 9
+    # the DELETE issued by the superseding PUT (or issued directly)
+    assert repository.compact[2] == 41
+    repository._rebuild_sparse(0)
+    assert repository.compact[0] == 41 + 8 + len(fchunk(b"123456789"))  # 9 is chunk or commit?
+
+
+def test_sparse1(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.put(H(1), fchunk(b"123456789"))
+        repository.commit(compact=False)
+        repository.put(H(1), fchunk(b"bar"))
+        _assert_sparse(repository)
+
+
+def test_sparse2(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.put(H(1), fchunk(b"123456789"))
+        repository.commit(compact=False)
+        repository.delete(H(1))
+        _assert_sparse(repository)
+
+
+def test_sparse_delete(repository):
+    with repository:
+        chunk0 = fchunk(b"1245")
+        repository.put(H(0), chunk0)
+        repository.delete(H(0))
+        repository.io._write_fd.sync()
+        # the on-line tracking works on a per-object basis...
+        assert repository.compact[0] == 41 + 8 + 41 + len(chunk0)
+        repository._rebuild_sparse(0)
         # ...while _rebuild_sparse can mark whole segments as completely sparse (which then includes the segment magic)
-        assert self.repository.compact[0] == 41 + 8 + 41 + len(ch0) + len(MAGIC)
+        assert repository.compact[0] == 41 + 8 + 41 + len(chunk0) + len(MAGIC)
+        repository.commit(compact=True)
+        assert 0 not in [segment for segment, _ in repository.io.segment_iterator()]
 
-        self.repository.commit(compact=True)
-        assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()]
 
-    def test_uncommitted_garbage(self):
+def test_uncommitted_garbage(repository):
+    with repository:
         # uncommitted garbage should be no problem, it is cleaned up automatically.
         # we just have to be careful with invalidation of cached FDs in LoggedIO.
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=False)
-        # write some crap to a uncommitted segment file
-        last_segment = self.repository.io.get_latest_segment()
-        with open(self.repository.io.segment_filename(last_segment + 1), "wb") as f:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=False)
+        # write some crap to an uncommitted segment file
+        last_segment = repository.io.get_latest_segment()
+        with open(repository.io.segment_filename(last_segment + 1), "wb") as f:
             f.write(MAGIC + b"crapcrapcrap")
-        self.reopen()
+    with reopen(repository) as repository:
         # usually, opening the repo and starting a transaction should trigger a cleanup.
-        self.repository.put(H(0), fchunk(b"bar"))  # this may trigger compact_segments()
-        self.repository.commit(compact=True)
+        repository.put(H(0), fchunk(b"bar"))  # this may trigger compact_segments()
+        repository.commit(compact=True)
         # the point here is that nothing blows up with an exception.
 
 
-class RepositoryCommitTestCase(RepositoryTestCaseBase):
-    def test_replay_of_missing_index(self):
-        self.add_keys()
-        for name in os.listdir(self.repository.path):
+def test_replay_of_missing_index(repository):
+    with repository:
+        add_keys(repository)
+        for name in os.listdir(repository.path):
             if name.startswith("index."):
-                os.unlink(os.path.join(self.repository.path, name))
-        self.reopen()
-        self.assert_equal(len(self.repository), 3)
-        self.assert_equal(self.repository.check(), True)
-
-    def test_crash_before_compact_segments(self):
-        self.add_keys()
-        self.repository.compact_segments = None
+                os.unlink(os.path.join(repository.path, name))
+    with reopen(repository) as repository:
+        assert len(repository) == 3
+        assert repository.check() is True
+
+
+def test_crash_before_compact_segments(repository):
+    with repository:
+        add_keys(repository)
+        repository.compact_segments = None
         try:
-            self.repository.commit(compact=True)
+            repository.commit(compact=True)
         except TypeError:
             pass
-        self.reopen()
-        self.assert_equal(len(self.repository), 3)
-        self.assert_equal(self.repository.check(), True)
+    with reopen(repository) as repository:
+        assert len(repository) == 3
+        assert repository.check() is True
 
-    def test_crash_before_write_index(self):
-        self.add_keys()
-        self.repository.write_index = None
+
+def test_crash_before_write_index(repository):
+    with repository:
+        add_keys(repository)
+        repository.write_index = None
         try:
-            self.repository.commit(compact=False)
+            repository.commit(compact=False)
         except TypeError:
             pass
-        self.reopen()
-        self.assert_equal(len(self.repository), 3)
-        self.assert_equal(self.repository.check(), True)
+    with reopen(repository) as repository:
+        assert len(repository) == 3
+        assert repository.check() is True
+
 
-    def test_replay_lock_upgrade_old(self):
-        self.add_keys()
-        for name in os.listdir(self.repository.path):
+def test_replay_lock_upgrade_old(repository):
+    with repository:
+        add_keys(repository)
+        for name in os.listdir(repository.path):
             if name.startswith("index."):
-                os.unlink(os.path.join(self.repository.path, name))
-        with patch.object(Lock, "upgrade", side_effect=LockFailed) as upgrade:
-            self.reopen(exclusive=None)  # simulate old client that always does lock upgrades
+                os.unlink(os.path.join(repository.path, name))
+    with patch.object(Lock, "upgrade", side_effect=LockFailed) as upgrade:
+        with reopen(repository, exclusive=None) as repository:
+            # simulate old client that always does lock upgrades
             # the repo is only locked by a shared read lock, but to replay segments,
             # we need an exclusive write lock - check if the lock gets upgraded.
-            self.assert_raises(LockFailed, lambda: len(self.repository))
+            with pytest.raises(LockFailed):
+                len(repository)
             upgrade.assert_called_once_with()
 
-    def test_replay_lock_upgrade(self):
-        self.add_keys()
-        for name in os.listdir(self.repository.path):
+
+def test_replay_lock_upgrade(repository):
+    with repository:
+        add_keys(repository)
+        for name in os.listdir(repository.path):
             if name.startswith("index."):
-                os.unlink(os.path.join(self.repository.path, name))
-        with patch.object(Lock, "upgrade", side_effect=LockFailed) as upgrade:
-            self.reopen(exclusive=False)  # current client usually does not do lock upgrade, except for replay
+                os.unlink(os.path.join(repository.path, name))
+    with patch.object(Lock, "upgrade", side_effect=LockFailed) as upgrade:
+        with reopen(repository, exclusive=False) as repository:
+            # current client usually does not do lock upgrade, except for replay
             # the repo is only locked by a shared read lock, but to replay segments,
             # we need an exclusive write lock - check if the lock gets upgraded.
-            self.assert_raises(LockFailed, lambda: len(self.repository))
+            with pytest.raises(LockFailed):
+                len(repository)
             upgrade.assert_called_once_with()
 
-    def test_crash_before_deleting_compacted_segments(self):
-        self.add_keys()
-        self.repository.io.delete_segment = None
+
+def test_crash_before_deleting_compacted_segments(repository):
+    with repository:
+        add_keys(repository)
+        repository.io.delete_segment = None
         try:
-            self.repository.commit(compact=False)
+            repository.commit(compact=False)
         except TypeError:
             pass
-        self.reopen()
-        self.assert_equal(len(self.repository), 3)
-        self.assert_equal(self.repository.check(), True)
-        self.assert_equal(len(self.repository), 3)
-
-    def test_ignores_commit_tag_in_data(self):
-        self.repository.put(H(0), LoggedIO.COMMIT)
-        self.reopen()
-        io = self.repository.io
+    with reopen(repository) as repository:
+        assert len(repository) == 3
+        assert repository.check() is True
+        assert len(repository) == 3
+
+
+def test_ignores_commit_tag_in_data(repository):
+    with repository:
+        repository.put(H(0), LoggedIO.COMMIT)
+    with reopen(repository) as repository:
+        io = repository.io
         assert not io.is_committed_segment(io.get_latest_segment())
 
-    def test_moved_deletes_are_tracked(self):
-        self.repository.put(H(1), fchunk(b"1"))
-        self.repository.put(H(2), fchunk(b"2"))
-        self.repository.commit(compact=False)
-        self.repo_dump("p1 p2 c")
-        self.repository.delete(H(1))
-        self.repository.commit(compact=True)
-        self.repo_dump("d1 cc")
-        last_segment = self.repository.io.get_latest_segment() - 1
+
+def test_moved_deletes_are_tracked(repository):
+    with repository:
+        repository.put(H(1), fchunk(b"1"))
+        repository.put(H(2), fchunk(b"2"))
+        repository.commit(compact=False)
+        repo_dump(repository, "p1 p2 c")
+        repository.delete(H(1))
+        repository.commit(compact=True)
+        repo_dump(repository, "d1 cc")
+        last_segment = repository.io.get_latest_segment() - 1
         num_deletes = 0
-        for tag, key, offset, size, _ in self.repository.io.iter_objects(last_segment):
+        for tag, key, offset, size, _ in repository.io.iter_objects(last_segment):
             if tag == TAG_DELETE:
                 assert key == H(1)
                 num_deletes += 1
         assert num_deletes == 1
-        assert last_segment in self.repository.compact
-        self.repository.put(H(3), fchunk(b"3"))
-        self.repository.commit(compact=True)
-        self.repo_dump("p3 cc")
-        assert last_segment not in self.repository.compact
-        assert not self.repository.io.segment_exists(last_segment)
-        for segment, _ in self.repository.io.segment_iterator():
-            for tag, key, offset, size, _ in self.repository.io.iter_objects(segment):
+        assert last_segment in repository.compact
+        repository.put(H(3), fchunk(b"3"))
+        repository.commit(compact=True)
+        repo_dump(repository, "p3 cc")
+        assert last_segment not in repository.compact
+        assert not repository.io.segment_exists(last_segment)
+        for segment, _ in repository.io.segment_iterator():
+            for tag, key, offset, size, _ in repository.io.iter_objects(segment):
                 assert tag != TAG_DELETE
                 assert key != H(1)
         # after compaction, there should be no empty shadowed_segments lists left over.
-        # we have no put or del any more for H(1), so we lost knowledge about H(1).
-        assert H(1) not in self.repository.shadow_index
+        # we have no put or del anymore for H(1), so we lost knowledge about H(1).
+        assert H(1) not in repository.shadow_index
+
 
-    def test_shadowed_entries_are_preserved(self):
-        get_latest_segment = self.repository.io.get_latest_segment
-        self.repository.put(H(1), fchunk(b"1"))
+def test_shadowed_entries_are_preserved(repository):
+    with repository:
+        get_latest_segment = repository.io.get_latest_segment
+        repository.put(H(1), fchunk(b"1"))
         # This is the segment with our original PUT of interest
         put_segment = get_latest_segment()
-        self.repository.commit(compact=False)
-
-        # We now delete H(1), and force this segment not to be compacted, which can happen
+        repository.commit(compact=False)
+        # we now delete H(1), and force this segment not to be compacted, which can happen
         # if it's not sparse enough (symbolized by H(2) here).
-        self.repository.delete(H(1))
-        self.repository.put(H(2), fchunk(b"1"))
-        delete_segment = get_latest_segment()
-
-        # We pretend these are mostly dense (not sparse) and won't be compacted
-        del self.repository.compact[put_segment]
-        del self.repository.compact[delete_segment]
-
-        self.repository.commit(compact=True)
-
-        # Now we perform an unrelated operation on the segment containing the DELETE,
+        repository.delete(H(1))
+        repository.put(H(2), fchunk(b"1"))
+        del_segment = get_latest_segment()
+        # we pretend these are mostly dense (not sparse) and won't be compacted
+        del repository.compact[put_segment]
+        del repository.compact[del_segment]
+        repository.commit(compact=True)
+        # we now perform an unrelated operation on the segment containing the DELETE,
         # causing it to be compacted.
-        self.repository.delete(H(2))
-        self.repository.commit(compact=True)
-
-        assert self.repository.io.segment_exists(put_segment)
-        assert not self.repository.io.segment_exists(delete_segment)
-
-        # Basic case, since the index survived this must be ok
-        assert H(1) not in self.repository
-        # Nuke index, force replay
-        os.unlink(os.path.join(self.repository.path, "index.%d" % get_latest_segment()))
-        # Must not reappear
-        assert H(1) not in self.repository
-
-    def test_shadow_index_rollback(self):
-        self.repository.put(H(1), fchunk(b"1"))
-        self.repository.delete(H(1))
-        assert self.repository.shadow_index[H(1)] == [0]
-        self.repository.commit(compact=True)
-        self.repo_dump("p1 d1 cc")
+        repository.delete(H(2))
+        repository.commit(compact=True)
+        assert repository.io.segment_exists(put_segment)
+        assert not repository.io.segment_exists(del_segment)
+        # basic case, since the index survived this must be ok
+        assert H(1) not in repository
+        # nuke index, force replay
+        os.unlink(os.path.join(repository.path, "index.%d" % get_latest_segment()))
+        # must not reappear
+        assert H(1) not in repository
+
+
+def test_shadow_index_rollback(repository):
+    with repository:
+        repository.put(H(1), fchunk(b"1"))
+        repository.delete(H(1))
+        assert repository.shadow_index[H(1)] == [0]
+        repository.commit(compact=True)
+        repo_dump(repository, "p1 d1 cc")
         # note how an empty list means that nothing is shadowed for sure
-        assert self.repository.shadow_index[H(1)] == []  # because the delete is considered unstable
-        self.repository.put(H(1), b"1")
-        self.repository.delete(H(1))
-        self.repo_dump("p1 d1")
+        assert repository.shadow_index[H(1)] == []  # because the deletion is considered unstable
+        repository.put(H(1), b"1")
+        repository.delete(H(1))
+        repo_dump(repository, "p1 d1")
         # 0 put/delete; 1 commit; 2 compacted; 3 commit; 4 put/delete
-        assert self.repository.shadow_index[H(1)] == [4]
-        self.repository.rollback()
-        self.repo_dump("r")
-        self.repository.put(H(2), fchunk(b"1"))
-        # After the rollback segment 4 shouldn't be considered anymore
-        assert self.repository.shadow_index[H(1)] == []  # because the delete is considered unstable
-
-
-class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase):
-    def open(self, create=False):
-        return Repository(os.path.join(self.tmppath, "repository"), exclusive=True, create=create, append_only=True)
-
-    def test_destroy_append_only(self):
-        # Can't destroy append only repo (via the API)
-        with self.assert_raises(ValueError):
-            self.repository.destroy()
-        assert self.repository.append_only
-
-    def test_append_only(self):
-        def segments_in_repository():
-            return len(list(self.repository.io.segment_iterator()))
-
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=False)
-
-        self.repository.append_only = False
-        assert segments_in_repository() == 2
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=True)
+        assert repository.shadow_index[H(1)] == [4]
+        repository.rollback()
+        repo_dump(repository, "r")
+        repository.put(H(2), fchunk(b"1"))
+        # after the rollback, segment 4 shouldn't be considered anymore
+        assert repository.shadow_index[H(1)] == []  # because the deletion is considered unstable
+
+
+def test_destroy_append_only(repository):
+    with repository:
+        # can't destroy append only repo (via the API)
+        repository.append_only = True
+        with pytest.raises(ValueError):
+            repository.destroy()
+        assert repository.append_only
+
+
+def test_append_only(repository):
+    def segments_in_repository(repo):
+        return len(list(repo.io.segment_iterator()))
+
+    with repository:
+        repository.append_only = True
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=False)
+
+        repository.append_only = False
+        assert segments_in_repository(repository) == 2
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=True)
         # normal: compact squashes the data together, only one segment
-        assert segments_in_repository() == 2
+        assert segments_in_repository(repository) == 2
 
-        self.repository.append_only = True
-        assert segments_in_repository() == 2
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=False)
+        repository.append_only = True
+        assert segments_in_repository(repository) == 2
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=False)
         # append only: does not compact, only new segments written
-        assert segments_in_repository() == 4
+        assert segments_in_repository(repository) == 4
 
 
-class RepositoryFreeSpaceTestCase(RepositoryTestCaseBase):
-    def test_additional_free_space(self):
-        self.add_keys()
-        self.repository.config.set("repository", "additional_free_space", "1000T")
-        self.repository.save_key(b"shortcut to save_config")
-        self.reopen()
-        self.repository.put(H(0), fchunk(b"foobar"))
+def test_additional_free_space(repository):
+    with repository:
+        add_keys(repository)
+        repository.config.set("repository", "additional_free_space", "1000T")
+        repository.save_key(b"shortcut to save_config")
+    with reopen(repository) as repository:
+        repository.put(H(0), fchunk(b"foobar"))
         with pytest.raises(Repository.InsufficientFreeSpaceError):
-            self.repository.commit(compact=False)
-        assert os.path.exists(self.repository.path)
+            repository.commit(compact=False)
+        assert os.path.exists(repository.path)
+
 
-    def test_create_free_space(self):
-        self.repository.additional_free_space = 1e20
+def test_create_free_space(repository):
+    with repository:
+        repository.additional_free_space = 1e20
         with pytest.raises(Repository.InsufficientFreeSpaceError):
-            self.add_keys()
-        assert not os.path.exists(self.repository.path)
+            add_keys(repository)
+        assert not os.path.exists(repository.path)
 
 
-class QuotaTestCase(RepositoryTestCaseBase):
-    def test_tracking(self):
-        assert self.repository.storage_quota_use == 0
+def test_tracking(repository):
+    with repository:
+        assert repository.storage_quota_use == 0
         ch1 = fchunk(bytes(1234))
-        self.repository.put(H(1), ch1)
-        assert self.repository.storage_quota_use == len(ch1) + 41 + 8
+        repository.put(H(1), ch1)
+        assert repository.storage_quota_use == len(ch1) + 41 + 8
         ch2 = fchunk(bytes(5678))
-        self.repository.put(H(2), ch2)
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)
-        self.repository.delete(H(1))
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)  # we have not compacted yet
-        self.repository.commit(compact=False)
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)  # we have not compacted yet
-        self.reopen()
-        # Open new transaction; hints and thus quota data is not loaded unless needed.
+        repository.put(H(2), ch2)
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)
+        repository.delete(H(1))
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)  # we have not compacted yet
+        repository.commit(compact=False)
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + 2 * (41 + 8)  # we have not compacted yet
+    with reopen(repository) as repository:
+        # open new transaction; hints and thus quota data is not loaded unless needed.
         ch3 = fchunk(b"")
-        self.repository.put(H(3), ch3)
-        self.repository.delete(H(3))
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + len(ch3) + 3 * (
+        repository.put(H(3), ch3)
+        repository.delete(H(3))
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + len(ch3) + 3 * (
             41 + 8
         )  # we have not compacted yet
-        self.repository.commit(compact=True)
-        assert self.repository.storage_quota_use == len(ch2) + 41 + 8
+        repository.commit(compact=True)
+        assert repository.storage_quota_use == len(ch2) + 41 + 8
+
 
-    def test_exceed_quota(self):
-        assert self.repository.storage_quota_use == 0
-        self.repository.storage_quota = 80
+def test_exceed_quota(repository):
+    with repository:
+        assert repository.storage_quota_use == 0
+        repository.storage_quota = 80
         ch1 = fchunk(b"x" * 7)
-        self.repository.put(H(1), ch1)
-        assert self.repository.storage_quota_use == len(ch1) + 41 + 8
-        self.repository.commit(compact=False)
+        repository.put(H(1), ch1)
+        assert repository.storage_quota_use == len(ch1) + 41 + 8
+        repository.commit(compact=False)
         with pytest.raises(Repository.StorageQuotaExceeded):
             ch2 = fchunk(b"y" * 13)
-            self.repository.put(H(2), ch2)
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2  # check ch2!?
+            repository.put(H(2), ch2)
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2  # check ch2!?
         with pytest.raises(Repository.StorageQuotaExceeded):
-            self.repository.commit(compact=False)
-        assert self.repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2  # check ch2!?
-        self.reopen()
-        self.repository.storage_quota = 150
-        # Open new transaction; hints and thus quota data is not loaded unless needed.
-        self.repository.put(H(1), ch1)
-        assert (
-            self.repository.storage_quota_use == len(ch1) * 2 + (41 + 8) * 2
-        )  # we have 2 puts for H(1) here and not yet compacted.
-        self.repository.commit(compact=True)
-        assert self.repository.storage_quota_use == len(ch1) + 41 + 8  # now we have compacted.
-
-
-class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
-    def setUp(self):
-        super().setUp()
-        self.repository.put(H(0), fchunk(b"foo"))
-        self.repository.commit(compact=False)
-        self.repository.close()
-
-    def do_commit(self):
-        with self.repository:
-            self.repository.put(H(0), fchunk(b"fox"))
-            self.repository.commit(compact=False)
-
-    def test_corrupted_hints(self):
-        with open(os.path.join(self.repository.path, "hints.1"), "ab") as fd:
-            fd.write(b"123456789")
-        self.do_commit()
-
-    def test_deleted_hints(self):
-        os.unlink(os.path.join(self.repository.path, "hints.1"))
-        self.do_commit()
-
-    def test_deleted_index(self):
-        os.unlink(os.path.join(self.repository.path, "index.1"))
-        self.do_commit()
-
-    def test_unreadable_hints(self):
-        hints = os.path.join(self.repository.path, "hints.1")
-        os.unlink(hints)
-        os.mkdir(hints)
-        with self.assert_raises(OSError):
-            self.do_commit()
-
-    def test_index(self):
-        with open(os.path.join(self.repository.path, "index.1"), "wb") as fd:
-            fd.write(b"123456789")
-        self.do_commit()
-
-    def test_index_outside_transaction(self):
-        with open(os.path.join(self.repository.path, "index.1"), "wb") as fd:
-            fd.write(b"123456789")
-        with self.repository:
-            assert len(self.repository) == 1
-
-    def _corrupt_index(self):
-        # HashIndex is able to detect incorrect headers and file lengths,
-        # but on its own it can't tell if the data is correct.
-        index_path = os.path.join(self.repository.path, "index.1")
-        with open(index_path, "r+b") as fd:
-            index_data = fd.read()
-            # Flip one bit in a key stored in the index
-            corrupted_key = (int.from_bytes(H(0), "little") ^ 1).to_bytes(32, "little")
-            corrupted_index_data = index_data.replace(H(0), corrupted_key)
-            assert corrupted_index_data != index_data
-            assert len(corrupted_index_data) == len(index_data)
-            fd.seek(0)
-            fd.write(corrupted_index_data)
-
-    def test_index_corrupted(self):
-        # HashIndex is able to detect incorrect headers and file lengths,
-        # but on its own it can't tell if the data itself is correct.
-        self._corrupt_index()
-        with self.repository:
-            # Data corruption is detected due to mismatching checksums
-            # and fixed by rebuilding the index.
-            assert len(self.repository) == 1
-            assert pdchunk(self.repository.get(H(0))) == b"foo"
-
-    def test_index_corrupted_without_integrity(self):
-        self._corrupt_index()
-        integrity_path = os.path.join(self.repository.path, "integrity.1")
-        os.unlink(integrity_path)
-        with self.repository:
-            # Since the corrupted key is not noticed, the repository still thinks
-            # it contains one key...
-            assert len(self.repository) == 1
-            with pytest.raises(Repository.ObjectNotFound):
-                # ... but the real, uncorrupted key is not found in the corrupted index.
-                self.repository.get(H(0))
-
-    def test_unreadable_index(self):
-        index = os.path.join(self.repository.path, "index.1")
-        os.unlink(index)
-        os.mkdir(index)
-        with self.assert_raises(OSError):
-            self.do_commit()
-
-    def test_unknown_integrity_version(self):
-        # For now an unknown integrity data version is ignored and not an error.
-        integrity_path = os.path.join(self.repository.path, "integrity.1")
-        with open(integrity_path, "r+b") as fd:
-            msgpack.pack(
-                {
-                    # Borg only understands version 2
-                    b"version": 4.7
-                },
-                fd,
-            )
-            fd.truncate()
-        with self.repository:
-            # No issues accessing the repository
-            assert len(self.repository) == 1
-            assert pdchunk(self.repository.get(H(0))) == b"foo"
-
-    def _subtly_corrupted_hints_setup(self):
-        with self.repository:
-            self.repository.append_only = True
-            assert len(self.repository) == 1
-            assert pdchunk(self.repository.get(H(0))) == b"foo"
-            self.repository.put(H(1), fchunk(b"bar"))
-            self.repository.put(H(2), fchunk(b"baz"))
-            self.repository.commit(compact=False)
-            self.repository.put(H(2), fchunk(b"bazz"))
-            self.repository.commit(compact=False)
-
-        hints_path = os.path.join(self.repository.path, "hints.5")
-        with open(hints_path, "r+b") as fd:
-            hints = msgpack.unpack(fd)
-            fd.seek(0)
-            # Corrupt segment refcount
-            assert hints["segments"][2] == 1
-            hints["segments"][2] = 0
-            msgpack.pack(hints, fd)
-            fd.truncate()
-
-    def test_subtly_corrupted_hints(self):
-        self._subtly_corrupted_hints_setup()
-        with self.repository:
-            self.repository.append_only = False
-            self.repository.put(H(3), fchunk(b"1234"))
-            # Do a compaction run. Succeeds, since the failed checksum prompted a rebuild of the index+hints.
-            self.repository.commit(compact=True)
-
-            assert len(self.repository) == 4
-            assert pdchunk(self.repository.get(H(0))) == b"foo"
-            assert pdchunk(self.repository.get(H(1))) == b"bar"
-            assert pdchunk(self.repository.get(H(2))) == b"bazz"
-
-    def test_subtly_corrupted_hints_without_integrity(self):
-        self._subtly_corrupted_hints_setup()
-        integrity_path = os.path.join(self.repository.path, "integrity.5")
-        os.unlink(integrity_path)
-        with self.repository:
-            self.repository.append_only = False
-            self.repository.put(H(3), fchunk(b"1234"))
-            # Do a compaction run. Fails, since the corrupted refcount was not detected and leads to an assertion failure.
-            with pytest.raises(AssertionError) as exc_info:
-                self.repository.commit(compact=True)
-            assert "Corrupted segment reference count" in str(exc_info.value)
-
-
-class RepositoryCheckTestCase(RepositoryTestCaseBase):
-    def list_indices(self):
-        return [name for name in os.listdir(os.path.join(self.tmppath, "repository")) if name.startswith("index.")]
-
-    def check(self, repair=False, status=True):
-        self.assert_equal(self.repository.check(repair=repair), status)
-        # Make sure no tmp files are left behind
-        self.assert_equal(
-            [name for name in os.listdir(os.path.join(self.tmppath, "repository")) if "tmp" in name],
-            [],
-            "Found tmp files",
-        )
-
-    def get_objects(self, *ids):
+            repository.commit(compact=False)
+        assert repository.storage_quota_use == len(ch1) + len(ch2) + (41 + 8) * 2  # check ch2!?
+    with reopen(repository) as repository:
+        repository.storage_quota = 150
+        # open new transaction; hints and thus quota data is not loaded unless needed.
+        repository.put(H(1), ch1)
+        # we have 2 puts for H(1) here and not yet compacted.
+        assert repository.storage_quota_use == len(ch1) * 2 + (41 + 8) * 2
+        repository.commit(compact=True)
+        assert repository.storage_quota_use == len(ch1) + 41 + 8  # now we have compacted.
+
+
+def make_auxiliary(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"foo"))
+        repository.commit(compact=False)
+
+
+def do_commit(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"fox"))
+        repository.commit(compact=False)
+
+
+def test_corrupted_hints(repository):
+    make_auxiliary(repository)
+    with open(os.path.join(repository.path, "hints.1"), "ab") as fd:
+        fd.write(b"123456789")
+    do_commit(repository)
+
+
+def test_deleted_hints(repository):
+    make_auxiliary(repository)
+    os.unlink(os.path.join(repository.path, "hints.1"))
+    do_commit(repository)
+
+
+def test_deleted_index(repository):
+    make_auxiliary(repository)
+    os.unlink(os.path.join(repository.path, "index.1"))
+    do_commit(repository)
+
+
+def test_unreadable_hints(repository):
+    make_auxiliary(repository)
+    hints = os.path.join(repository.path, "hints.1")
+    os.unlink(hints)
+    os.mkdir(hints)
+    with pytest.raises(OSError):
+        do_commit(repository)
+
+
+def test_index(repository):
+    make_auxiliary(repository)
+    with open(os.path.join(repository.path, "index.1"), "wb") as fd:
+        fd.write(b"123456789")
+    do_commit(repository)
+
+
+def test_index_outside_transaction(repository):
+    make_auxiliary(repository)
+    with open(os.path.join(repository.path, "index.1"), "wb") as fd:
+        fd.write(b"123456789")
+    with repository:
+        assert len(repository) == 1
+
+
+def _corrupt_index(repository):
+    # HashIndex is able to detect incorrect headers and file lengths,
+    # but on its own it can't tell if the data is correct.
+    index_path = os.path.join(repository.path, "index.1")
+    with open(index_path, "r+b") as fd:
+        index_data = fd.read()
+        # Flip one bit in a key stored in the index
+        corrupted_key = (int.from_bytes(H(0), "little") ^ 1).to_bytes(32, "little")
+        corrupted_index_data = index_data.replace(H(0), corrupted_key)
+        assert corrupted_index_data != index_data
+        assert len(corrupted_index_data) == len(index_data)
+        fd.seek(0)
+        fd.write(corrupted_index_data)
+
+
+def test_index_corrupted(repository):
+    make_auxiliary(repository)
+    _corrupt_index(repository)
+    with repository:
+        # data corruption is detected due to mismatching checksums, and fixed by rebuilding the index.
+        assert len(repository) == 1
+        assert pdchunk(repository.get(H(0))) == b"foo"
+
+
+def test_index_corrupted_without_integrity(repository):
+    make_auxiliary(repository)
+    _corrupt_index(repository)
+    integrity_path = os.path.join(repository.path, "integrity.1")
+    os.unlink(integrity_path)
+    with repository:
+        # since the corrupted key is not noticed, the repository still thinks it contains one key...
+        assert len(repository) == 1
+        with pytest.raises(Repository.ObjectNotFound):
+            # ... but the real, uncorrupted key is not found in the corrupted index.
+            repository.get(H(0))
+
+
+def test_unreadable_index(repository):
+    make_auxiliary(repository)
+    index = os.path.join(repository.path, "index.1")
+    os.unlink(index)
+    os.mkdir(index)
+    with pytest.raises(OSError):
+        do_commit(repository)
+
+
+def test_unknown_integrity_version(repository):
+    make_auxiliary(repository)
+    # for now an unknown integrity data version is ignored and not an error.
+    integrity_path = os.path.join(repository.path, "integrity.1")
+    with open(integrity_path, "r+b") as fd:
+        msgpack.pack({b"version": 4.7}, fd)  # borg only understands version 2
+        fd.truncate()
+    with repository:
+        # no issues accessing the repository
+        assert len(repository) == 1
+        assert pdchunk(repository.get(H(0))) == b"foo"
+
+
+def _subtly_corrupted_hints_setup(repository):
+    with repository:
+        repository.append_only = True
+        assert len(repository) == 1
+        assert pdchunk(repository.get(H(0))) == b"foo"
+        repository.put(H(1), fchunk(b"bar"))
+        repository.put(H(2), fchunk(b"baz"))
+        repository.commit(compact=False)
+        repository.put(H(2), fchunk(b"bazz"))
+        repository.commit(compact=False)
+    hints_path = os.path.join(repository.path, "hints.5")
+    with open(hints_path, "r+b") as fd:
+        hints = msgpack.unpack(fd)
+        fd.seek(0)
+        # corrupt segment refcount
+        assert hints["segments"][2] == 1
+        hints["segments"][2] = 0
+        msgpack.pack(hints, fd)
+        fd.truncate()
+
+
+def test_subtly_corrupted_hints(repository):
+    make_auxiliary(repository)
+    _subtly_corrupted_hints_setup(repository)
+    with repository:
+        repository.append_only = False
+        repository.put(H(3), fchunk(b"1234"))
+        # do a compaction run, which succeeds since the failed checksum prompted a rebuild of the index+hints.
+        repository.commit(compact=True)
+        assert len(repository) == 4
+        assert pdchunk(repository.get(H(0))) == b"foo"
+        assert pdchunk(repository.get(H(1))) == b"bar"
+        assert pdchunk(repository.get(H(2))) == b"bazz"
+
+
+def test_subtly_corrupted_hints_without_integrity(repository):
+    make_auxiliary(repository)
+    _subtly_corrupted_hints_setup(repository)
+    integrity_path = os.path.join(repository.path, "integrity.5")
+    os.unlink(integrity_path)
+    with repository:
+        repository.append_only = False
+        repository.put(H(3), fchunk(b"1234"))
+        # do a compaction run, which fails since the corrupted refcount wasn't detected and causes an assertion failure.
+        with pytest.raises(AssertionError) as exc_info:
+            repository.commit(compact=True)
+        assert "Corrupted segment reference count" in str(exc_info.value)
+
+
+def list_indices(repo_path):
+    return [name for name in os.listdir(repo_path) if name.startswith("index.")]
+
+
+def check(repository, repo_path, repair=False, status=True):
+    assert repository.check(repair=repair) == status
+    # Make sure no tmp files are left behind
+    tmp_files = [name for name in os.listdir(repo_path) if "tmp" in name]
+    assert tmp_files == [], "Found tmp files"
+
+
+def get_objects(repository, *ids):
+    for id_ in ids:
+        pdchunk(repository.get(H(id_)))
+
+
+def add_objects(repository, segments):
+    for ids in segments:
         for id_ in ids:
-            pdchunk(self.repository.get(H(id_)))
+            repository.put(H(id_), fchunk(b"data"))
+        repository.commit(compact=False)
 
-    def add_objects(self, segments):
-        for ids in segments:
-            for id_ in ids:
-                self.repository.put(H(id_), fchunk(b"data"))
-            self.repository.commit(compact=False)
 
-    def get_head(self):
-        return sorted(int(n) for n in os.listdir(os.path.join(self.tmppath, "repository", "data", "0")) if n.isdigit())[
-            -1
-        ]
+def get_head(repo_path):
+    return sorted(int(n) for n in os.listdir(os.path.join(repo_path, "data", "0")) if n.isdigit())[-1]
+
+
+def open_index(repo_path):
+    return NSIndex.read(os.path.join(repo_path, f"index.{get_head(repo_path)}"))
+
+
+def corrupt_object(repo_path, id_):
+    idx = open_index(repo_path)
+    segment, offset, _ = idx[H(id_)]
+    with open(os.path.join(repo_path, "data", "0", str(segment)), "r+b") as fd:
+        fd.seek(offset)
+        fd.write(b"BOOM")
+
+
+def delete_segment(repository, segment):
+    repository.io.delete_segment(segment)
+
+
+def delete_index(repo_path):
+    os.unlink(os.path.join(repo_path, f"index.{get_head(repo_path)}"))
+
+
+def rename_index(repo_path, new_name):
+    os.replace(os.path.join(repo_path, f"index.{get_head(repo_path)}"), os.path.join(repo_path, new_name))
 
-    def open_index(self):
-        return NSIndex.read(os.path.join(self.tmppath, "repository", f"index.{self.get_head()}"))
-
-    def corrupt_object(self, id_):
-        idx = self.open_index()
-        segment, offset, _ = idx[H(id_)]
-        with open(os.path.join(self.tmppath, "repository", "data", "0", str(segment)), "r+b") as fd:
-            fd.seek(offset)
-            fd.write(b"BOOM")
-
-    def delete_segment(self, segment):
-        self.repository.io.delete_segment(segment)
-
-    def delete_index(self):
-        os.unlink(os.path.join(self.tmppath, "repository", f"index.{self.get_head()}"))
-
-    def rename_index(self, new_name):
-        os.replace(
-            os.path.join(self.tmppath, "repository", f"index.{self.get_head()}"),
-            os.path.join(self.tmppath, "repository", new_name),
-        )
-
-    def list_objects(self):
-        return {int(key) for key in self.repository.list()}
-
-    def test_repair_corrupted_segment(self):
-        self.add_objects([[1, 2, 3], [4, 5], [6]])
-        self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
-        self.check(status=True)
-        self.corrupt_object(5)
-        self.assert_raises(IntegrityError, lambda: self.get_objects(5))
-        self.repository.rollback()
-        # Make sure a regular check does not repair anything
-        self.check(status=False)
-        self.check(status=False)
-        # Make sure a repair actually repairs the repo
-        self.check(repair=True, status=True)
-        self.get_objects(4)
-        self.check(status=True)
-        self.assert_equal({1, 2, 3, 4, 6}, self.list_objects())
-
-    def test_repair_missing_segment(self):
-        self.add_objects([[1, 2, 3], [4, 5, 6]])
-        self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
-        self.check(status=True)
-        self.delete_segment(2)
-        self.repository.rollback()
-        self.check(repair=True, status=True)
-        self.assert_equal({1, 2, 3}, self.list_objects())
-
-    def test_repair_missing_commit_segment(self):
-        self.add_objects([[1, 2, 3], [4, 5, 6]])
-        self.delete_segment(3)
-        self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
-        self.assert_equal({1, 2, 3}, self.list_objects())
-
-    def test_repair_corrupted_commit_segment(self):
-        self.add_objects([[1, 2, 3], [4, 5, 6]])
-        with open(os.path.join(self.tmppath, "repository", "data", "0", "3"), "r+b") as fd:
+
+def list_objects(repository):
+    return {int(key) for key in repository.list()}
+
+
+def test_repair_corrupted_segment(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repo_path = get_path(repository)
+        add_objects(repository, [[1, 2, 3], [4, 5], [6]])
+        assert {1, 2, 3, 4, 5, 6} == list_objects(repository)
+        check(repository, repo_path, status=True)
+        corrupt_object(repo_path, 5)
+        with pytest.raises(IntegrityError):
+            get_objects(repository, 5)
+        repository.rollback()
+        # make sure a regular check does not repair anything
+        check(repository, repo_path, status=False)
+        check(repository, repo_path, status=False)
+        # make sure a repair actually repairs the repo
+        check(repository, repo_path, repair=True, status=True)
+        get_objects(repository, 4)
+        check(repository, repo_path, status=True)
+        assert {1, 2, 3, 4, 6} == list_objects(repository)
+
+
+def test_repair_missing_segment(repository):
+    # only test on local repo - files in RemoteRepository cannot be deleted
+    with repository:
+        add_objects(repository, [[1, 2, 3], [4, 5, 6]])
+        assert {1, 2, 3, 4, 5, 6} == list_objects(repository)
+        check(repository, repository.path, status=True)
+        delete_segment(repository, 2)
+        repository.rollback()
+        check(repository, repository.path, repair=True, status=True)
+        assert {1, 2, 3} == list_objects(repository)
+
+
+def test_repair_missing_commit_segment(repository):
+    # only test on local repo - files in RemoteRepository cannot be deleted
+    with repository:
+        add_objects(repository, [[1, 2, 3], [4, 5, 6]])
+        delete_segment(repository, 3)
+        with pytest.raises(Repository.ObjectNotFound):
+            get_objects(repository, 4)
+        assert {1, 2, 3} == list_objects(repository)
+
+
+def test_repair_corrupted_commit_segment(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repo_path = get_path(repository)
+        add_objects(repository, [[1, 2, 3], [4, 5, 6]])
+        with open(os.path.join(repo_path, "data", "0", "3"), "r+b") as fd:
             fd.seek(-1, os.SEEK_END)
             fd.write(b"X")
-        self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
-        self.check(status=True)
-        self.get_objects(3)
-        self.assert_equal({1, 2, 3}, self.list_objects())
-
-    def test_repair_no_commits(self):
-        self.add_objects([[1, 2, 3]])
-        with open(os.path.join(self.tmppath, "repository", "data", "0", "1"), "r+b") as fd:
+        with pytest.raises(Repository.ObjectNotFound):
+            get_objects(repository, 4)
+        check(repository, repo_path, status=True)
+        get_objects(repository, 3)
+        assert {1, 2, 3} == list_objects(repository)
+
+
+def test_repair_no_commits(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repo_path = get_path(repository)
+        add_objects(repository, [[1, 2, 3]])
+        with open(os.path.join(repo_path, "data", "0", "1"), "r+b") as fd:
             fd.seek(-1, os.SEEK_END)
             fd.write(b"X")
-        self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4))
-        self.check(status=False)
-        self.check(status=False)
-        self.assert_equal(self.list_indices(), ["index.1"])
-        self.check(repair=True, status=True)
-        self.assert_equal(self.list_indices(), ["index.2"])
-        self.check(status=True)
-        self.get_objects(3)
-        self.assert_equal({1, 2, 3}, self.list_objects())
-
-    def test_repair_missing_index(self):
-        self.add_objects([[1, 2, 3], [4, 5, 6]])
-        self.delete_index()
-        self.check(status=True)
-        self.get_objects(4)
-        self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
-
-    def test_repair_index_too_new(self):
-        self.add_objects([[1, 2, 3], [4, 5, 6]])
-        self.assert_equal(self.list_indices(), ["index.3"])
-        self.rename_index("index.100")
-        self.check(status=True)
-        self.assert_equal(self.list_indices(), ["index.3"])
-        self.get_objects(4)
-        self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
-
-    def test_crash_before_compact(self):
-        self.repository.put(H(0), fchunk(b"data"))
-        self.repository.put(H(0), fchunk(b"data2"))
-        # Simulate a crash before compact
+        with pytest.raises(Repository.CheckNeeded):
+            get_objects(repository, 4)
+        check(repository, repo_path, status=False)
+        check(repository, repo_path, status=False)
+        assert list_indices(repo_path) == ["index.1"]
+        check(repository, repo_path, repair=True, status=True)
+        assert list_indices(repo_path) == ["index.2"]
+        check(repository, repo_path, status=True)
+        get_objects(repository, 3)
+        assert {1, 2, 3} == list_objects(repository)
+
+
+def test_repair_missing_index(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repo_path = get_path(repository)
+        add_objects(repository, [[1, 2, 3], [4, 5, 6]])
+        delete_index(repo_path)
+        check(repository, repo_path, status=True)
+        get_objects(repository, 4)
+        assert {1, 2, 3, 4, 5, 6} == list_objects(repository)
+
+
+def test_repair_index_too_new(repo_fixtures, request):
+    with get_repository_from_fixture(repo_fixtures, request) as repository:
+        repo_path = get_path(repository)
+        add_objects(repository, [[1, 2, 3], [4, 5, 6]])
+        assert list_indices(repo_path) == ["index.3"]
+        rename_index(repo_path, "index.100")
+        check(repository, repo_path, status=True)
+        assert list_indices(repo_path) == ["index.3"]
+        get_objects(repository, 4)
+        assert {1, 2, 3, 4, 5, 6} == list_objects(repository)
+
+
+def test_crash_before_compact(repository):
+    # only test on local repo - we can't mock-patch a RemoteRepository class in another process!
+    with repository:
+        repository.put(H(0), fchunk(b"data"))
+        repository.put(H(0), fchunk(b"data2"))
+        # simulate a crash before compact
         with patch.object(Repository, "compact_segments") as compact:
-            self.repository.commit(compact=True)
+            repository.commit(compact=True)
             compact.assert_called_once_with(0.1)
-        self.reopen()
-        self.check(repair=True)
-        self.assert_equal(pdchunk(self.repository.get(H(0))), b"data2")
-
-
-class RepositoryHintsTestCase(RepositoryTestCaseBase):
-    def test_hints_persistence(self):
-        self.repository.put(H(0), fchunk(b"data"))
-        self.repository.delete(H(0))
-        self.repository.commit(compact=False)
-        shadow_index_expected = self.repository.shadow_index
-        compact_expected = self.repository.compact
-        segments_expected = self.repository.segments
-        # close and re-open the repository (create fresh Repository instance) to
-        # check whether hints were persisted to / reloaded from disk
-        self.reopen()
-        # see also do_compact()
-        self.repository.put(H(42), fchunk(b"foobar"))  # this will call prepare_txn() and load the hints data
+    with reopen(repository) as repository:
+        check(repository, repository.path, repair=True)
+        assert pdchunk(repository.get(H(0))) == b"data2"
+
+
+def test_hints_persistence(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"data"))
+        repository.delete(H(0))
+        repository.commit(compact=False)
+        shadow_index_expected = repository.shadow_index
+        compact_expected = repository.compact
+        segments_expected = repository.segments
+    # close and re-open the repository (create fresh Repository instance) to
+    # check whether hints were persisted to / reloaded from disk
+    with reopen(repository) as repository:
+        repository.put(H(42), fchunk(b"foobar"))  # this will call prepare_txn() and load the hints data
         # check if hints persistence worked:
-        self.assert_equal(shadow_index_expected, self.repository.shadow_index)
-        self.assert_equal(compact_expected, self.repository.compact)
-        del self.repository.segments[2]  # ignore the segment created by put(H(42), ...)
-        self.assert_equal(segments_expected, self.repository.segments)
-
-    def test_hints_behaviour(self):
-        self.repository.put(H(0), fchunk(b"data"))
-        self.assert_equal(self.repository.shadow_index, {})
-        assert len(self.repository.compact) == 0
-        self.repository.delete(H(0))
-        self.repository.commit(compact=False)
+        assert shadow_index_expected == repository.shadow_index
+        assert compact_expected == repository.compact
+        del repository.segments[2]  # ignore the segment created by put(H(42), ...)
+        assert segments_expected == repository.segments
+
+
+def test_hints_behaviour(repository):
+    with repository:
+        repository.put(H(0), fchunk(b"data"))
+        assert repository.shadow_index == {}
+        assert len(repository.compact) == 0
+        repository.delete(H(0))
+        repository.commit(compact=False)
         # now there should be an entry for H(0) in shadow_index
-        self.assert_in(H(0), self.repository.shadow_index)
-        self.assert_equal(len(self.repository.shadow_index[H(0)]), 1)
-        self.assert_in(0, self.repository.compact)  # segment 0 can be compacted
-        self.repository.put(H(42), fchunk(b"foobar"))  # see also do_compact()
-        self.repository.commit(compact=True, threshold=0.0)  # compact completely!
-        # nothing to compact any more! no info left about stuff that does not exist any more:
-        self.assert_not_in(H(0), self.repository.shadow_index)
+        assert H(0) in repository.shadow_index
+        assert len(repository.shadow_index[H(0)]) == 1
+        assert 0 in repository.compact  # segment 0 can be compacted
+        repository.put(H(42), fchunk(b"foobar"))  # see also do_compact()
+        repository.commit(compact=True, threshold=0.0)  # compact completely!
+        # nothing to compact anymore! no info left about stuff that does not exist anymore:
+        assert H(0) not in repository.shadow_index
         # segment 0 was compacted away, no info about it left:
-        self.assert_not_in(0, self.repository.compact)
-        self.assert_not_in(0, self.repository.segments)
+        assert 0 not in repository.compact
+        assert 0 not in repository.segments
 
 
-class RemoteRepositoryTestCase(RepositoryTestCase):
-    repository = None  # type: RemoteRepository
+def _get_mock_args():
+    class MockArgs:
+        remote_path = "borg"
+        umask = 0o077
+        debug_topics = []
+        rsh = None
 
-    def open(self, create=False, exclusive=UNSPECIFIED):
-        return RemoteRepository(
-            Location("ssh://__testsuite__" + os.path.join(self.tmppath, "repository")), exclusive=True, create=create
-        )
+        def __contains__(self, item):
+            # to behave like argparse.Namespace
+            return hasattr(self, item)
 
-    def _get_mock_args(self):
-        class MockArgs:
-            remote_path = "borg"
-            umask = 0o077
-            debug_topics = []
-            rsh = None
+    return MockArgs()
 
-            def __contains__(self, item):
-                # To behave like argparse.Namespace
-                return hasattr(self, item)
 
-        return MockArgs()
+def test_remote_invalid_rpc(remote_repository):
+    with remote_repository:
+        with pytest.raises(InvalidRPCMethod):
+            remote_repository.call("__init__", {})
 
-    def test_invalid_rpc(self):
-        self.assert_raises(InvalidRPCMethod, lambda: self.repository.call("__init__", {}))
 
-    def test_rpc_exception_transport(self):
+def test_remote_rpc_exception_transport(remote_repository):
+    with remote_repository:
         s1 = "test string"
 
         try:
-            self.repository.call("inject_exception", {"kind": "DoesNotExist"})
+            remote_repository.call("inject_exception", {"kind": "DoesNotExist"})
         except Repository.DoesNotExist as e:
             assert len(e.args) == 1
-            assert e.args[0] == self.repository.location.processed
+            assert e.args[0] == remote_repository.location.processed
 
         try:
-            self.repository.call("inject_exception", {"kind": "AlreadyExists"})
+            remote_repository.call("inject_exception", {"kind": "AlreadyExists"})
         except Repository.AlreadyExists as e:
             assert len(e.args) == 1
-            assert e.args[0] == self.repository.location.processed
+            assert e.args[0] == remote_repository.location.processed
 
         try:
-            self.repository.call("inject_exception", {"kind": "CheckNeeded"})
+            remote_repository.call("inject_exception", {"kind": "CheckNeeded"})
         except Repository.CheckNeeded as e:
             assert len(e.args) == 1
-            assert e.args[0] == self.repository.location.processed
+            assert e.args[0] == remote_repository.location.processed
 
         try:
-            self.repository.call("inject_exception", {"kind": "IntegrityError"})
+            remote_repository.call("inject_exception", {"kind": "IntegrityError"})
         except IntegrityError as e:
             assert len(e.args) == 1
             assert e.args[0] == s1
 
         try:
-            self.repository.call("inject_exception", {"kind": "PathNotAllowed"})
+            remote_repository.call("inject_exception", {"kind": "PathNotAllowed"})
         except PathNotAllowed as e:
             assert len(e.args) == 1
             assert e.args[0] == "foo"
 
         try:
-            self.repository.call("inject_exception", {"kind": "ObjectNotFound"})
+            remote_repository.call("inject_exception", {"kind": "ObjectNotFound"})
         except Repository.ObjectNotFound as e:
             assert len(e.args) == 2
             assert e.args[0] == s1
-            assert e.args[1] == self.repository.location.processed
+            assert e.args[1] == remote_repository.location.processed
 
         try:
-            self.repository.call("inject_exception", {"kind": "InvalidRPCMethod"})
+            remote_repository.call("inject_exception", {"kind": "InvalidRPCMethod"})
         except InvalidRPCMethod as e:
             assert len(e.args) == 1
             assert e.args[0] == s1
 
         try:
-            self.repository.call("inject_exception", {"kind": "divide"})
+            remote_repository.call("inject_exception", {"kind": "divide"})
         except RemoteRepository.RPCError as e:
             assert e.unpacked
             assert e.get_message() == "ZeroDivisionError: integer division or modulo by zero\n"
             assert e.exception_class == "ZeroDivisionError"
             assert len(e.exception_full) > 0
 
-    def test_ssh_cmd(self):
-        args = self._get_mock_args()
-        self.repository._args = args
-        assert self.repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "example.com"]
-        assert self.repository.ssh_cmd(Location("ssh://user@example.com/foo")) == ["ssh", "user@example.com"]
-        assert self.repository.ssh_cmd(Location("ssh://user@example.com:1234/foo")) == [
+
+def test_remote_ssh_cmd(remote_repository):
+    with remote_repository:
+        args = _get_mock_args()
+        remote_repository._args = args
+        assert remote_repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "example.com"]
+        assert remote_repository.ssh_cmd(Location("ssh://user@example.com/foo")) == ["ssh", "user@example.com"]
+        assert remote_repository.ssh_cmd(Location("ssh://user@example.com:1234/foo")) == [
             "ssh",
             "-p",
             "1234",
             "user@example.com",
         ]
         os.environ["BORG_RSH"] = "ssh --foo"
-        assert self.repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "--foo", "example.com"]
+        assert remote_repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "--foo", "example.com"]
 
-    def test_borg_cmd(self):
-        assert self.repository.borg_cmd(None, testing=True) == [sys.executable, "-m", "borg", "serve"]
-        args = self._get_mock_args()
+
+def test_remote_borg_cmd(remote_repository):
+    with remote_repository:
+        assert remote_repository.borg_cmd(None, testing=True) == [sys.executable, "-m", "borg", "serve"]
+        args = _get_mock_args()
         # XXX without next line we get spurious test fails when using pytest-xdist, root cause unknown:
         logging.getLogger().setLevel(logging.INFO)
         # note: test logger is on info log level, so --info gets added automagically
-        assert self.repository.borg_cmd(args, testing=False) == ["borg", "serve", "--info"]
+        assert remote_repository.borg_cmd(args, testing=False) == ["borg", "serve", "--info"]
         args.remote_path = "borg-0.28.2"
-        assert self.repository.borg_cmd(args, testing=False) == ["borg-0.28.2", "serve", "--info"]
+        assert remote_repository.borg_cmd(args, testing=False) == ["borg-0.28.2", "serve", "--info"]
         args.debug_topics = ["something_client_side", "repository_compaction"]
-        assert self.repository.borg_cmd(args, testing=False) == [
+        assert remote_repository.borg_cmd(args, testing=False) == [
             "borg-0.28.2",
             "serve",
             "--info",
             "--debug-topic=borg.debug.repository_compaction",
         ]
-        args = self._get_mock_args()
+        args = _get_mock_args()
         args.storage_quota = 0
-        assert self.repository.borg_cmd(args, testing=False) == ["borg", "serve", "--info"]
+        assert remote_repository.borg_cmd(args, testing=False) == ["borg", "serve", "--info"]
         args.storage_quota = 314159265
-        assert self.repository.borg_cmd(args, testing=False) == ["borg", "serve", "--info", "--storage-quota=314159265"]
+        assert remote_repository.borg_cmd(args, testing=False) == [
+            "borg",
+            "serve",
+            "--info",
+            "--storage-quota=314159265",
+        ]
         args.rsh = "ssh -i foo"
-        self.repository._args = args
-        assert self.repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "-i", "foo", "example.com"]
-
-
-class RemoteRepositoryCheckTestCase(RepositoryCheckTestCase):
-    def open(self, create=False):
-        return RemoteRepository(
-            Location("ssh://__testsuite__" + os.path.join(self.tmppath, "repository")), exclusive=True, create=create
-        )
-
-    def test_crash_before_compact(self):
-        # skip this test, we can't mock-patch a Repository class in another process!
-        pass
-
-    def test_repair_missing_commit_segment(self):
-        # skip this test, files in RemoteRepository cannot be deleted
-        pass
-
-    def test_repair_missing_segment(self):
-        # skip this test, files in RemoteRepository cannot be deleted
-        pass
+        remote_repository._args = args
+        assert remote_repository.ssh_cmd(Location("ssh://example.com/foo")) == ["ssh", "-i", "foo", "example.com"]