2
0
Эх сурвалжийг харах

tests: fix typos and grammar

Thomas Waldmann 1 долоо хоног өмнө
parent
commit
477366f4a5

+ 8 - 8
src/borg/testsuite/__init__.py

@@ -22,7 +22,7 @@ from ..helpers import umount
 from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
 from .. import platform
 
-# Note: this is used by borg.selftest, do not use or import py.test functionality here.
+# Note: this is used by borg.selftest; do not use or import pytest functionality here.
 
 from ..fuse_impl import llfuse, has_pyfuse3, has_llfuse
 
@@ -54,7 +54,7 @@ if sys.platform.startswith('netbsd'):
 
 
 def same_ts_ns(ts_ns1, ts_ns2):
-    """compare 2 timestamps (both in nanoseconds) whether they are (roughly) equal"""
+    """Compare two timestamps (both in nanoseconds) to determine whether they are (roughly) equal."""
     diff_ts = int(abs(ts_ns1 - ts_ns2))
     diff_max = 10 ** (-st_mtime_ns_round)
     return diff_ts <= diff_max
@@ -81,7 +81,7 @@ def are_symlinks_supported():
 @functools.lru_cache
 def are_hardlinks_supported():
     if not hasattr(os, 'link'):
-        # some pythons do not have os.link
+        # Some Python builds do not have os.link
         return False
 
     with unopened_tempfile() as file1path, unopened_tempfile() as file2path:
@@ -157,7 +157,7 @@ def is_birthtime_fully_supported():
 
 
 def no_selinux(x):
-    # selinux fails our FUSE tests, thus ignore selinux xattrs
+    # SELinux fails our FUSE tests; thus, ignore SELinux xattrs
     SELINUX_KEY = b'security.selinux'
     if isinstance(x, dict):
         return {k: v for k, v in x.items() if k != SELINUX_KEY}
@@ -222,7 +222,7 @@ class BaseTestCase(unittest.TestCase):
                 d1[4] = None
             if not stat.S_ISCHR(s2.st_mode) and not stat.S_ISBLK(s2.st_mode):
                 d2[4] = None
-            # If utime isn't fully supported, borg can't set mtime.
+            # If utime isn't fully supported, Borg can't set mtime.
             # Therefore, we shouldn't test it in that case.
             if is_utime_fully_supported():
                 # Older versions of llfuse do not support ns precision properly
@@ -301,7 +301,7 @@ class BaseTestCase(unittest.TestCase):
         time.sleep(0.2)
 
     def wait_for_mountstate(self, mountpoint, *, mounted, timeout=5):
-        """Wait until a path meets specified mount point status"""
+        """Wait until a path meets the specified mount point status."""
         timeout += time.time()
         while timeout > time.time():
             if os.path.ismount(mountpoint) == mounted:
@@ -312,7 +312,7 @@ class BaseTestCase(unittest.TestCase):
 
     @contextmanager
     def read_only(self, path):
-        """Some paths need to be made read-only for testing
+        """Some paths need to be made read-only for testing.
 
         If the tests are executed inside a fakeroot environment, the
         changes from chmod won't affect the real permissions of that
@@ -379,7 +379,7 @@ class environment_variable:
 
 
 class FakeInputs:
-    """Simulate multiple user inputs, can be used as input() replacement"""
+    """Simulate multiple user inputs; can be used as an input() replacement."""
     def __init__(self, inputs):
         self.inputs = inputs
 

+ 2 - 2
src/borg/testsuite/archiver.py

@@ -103,7 +103,7 @@ def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_outpu
             try:
                 args = archiver.parse_args(list(args))
                 # argparse parsing may raise SystemExit when the command line is bad or
-                # actions that abort early (eg. --help) where given. Catch this and return
+                # Actions that abort early (e.g., --help) were given. Catch this and return
                 # the error code as-if we invoked a Borg binary.
             except SystemExit as e:
                 output_text.flush()
@@ -846,7 +846,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
         shutil.rmtree(self.cache_path)
         self.cmd('info', self.repository_location)
 
-        # Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
+        # Needs confirmation: cache and security dir both gone (e.g., another host or rm -rf ~)
         shutil.rmtree(self.cache_path)
         shutil.rmtree(self.get_security_dir())
         if self.FORK_DEFAULT:

+ 1 - 1
src/borg/testsuite/benchmark.py

@@ -1,5 +1,5 @@
 """
-Do benchmarks using pytest-benchmark.
+Run benchmarks using pytest-benchmark.
 
 Usage:
 

+ 1 - 1
src/borg/testsuite/chunker.py

@@ -9,7 +9,7 @@ from . import BaseTestCase
 
 
 def cf(chunks):
-    """chunk filter"""
+    """Chunk filter."""
     # this is to simplify testing: either return the data piece (bytes) or the hole length (int).
     def _cf(chunk):
         if chunk.meta['allocation'] == CH_DATA:

+ 2 - 2
src/borg/testsuite/chunker_pytest.py

@@ -140,7 +140,7 @@ def test_chunkify_sparse(tmpdir, fname, sparse_map, header_size, sparse):
 
 def test_buzhash_chunksize_distribution():
     data = os.urandom(1048576)
-    min_exp, max_exp, mask = 10, 16, 14  # chunk size target 16kiB, clip at 1kiB and 64kiB
+    min_exp, max_exp, mask = 10, 16, 14  # chunk size target 16 KiB, clip at 1 KiB and 64 KiB
     chunker = Chunker(0, min_exp, max_exp, mask, 4095)
     f = BytesIO(data)
     chunks = cf(chunker.chunkify(f))
@@ -153,7 +153,7 @@ def test_buzhash_chunksize_distribution():
     max_count = sum(int(size == 2 ** max_exp) for size in chunk_sizes)
     print(f"count: {chunks_count} min: {min_chunksize_observed} max: {max_chunksize_observed} "
           f"min count: {min_count} max count: {max_count}")
-    # usually there will about 64 chunks
+    # usually there will be about 64 chunks
     assert 32 < chunks_count < 128
     # chunks always must be between min and max (clipping must work):
     assert min_chunksize_observed >= 2 ** min_exp

+ 1 - 1
src/borg/testsuite/chunker_slow.py

@@ -38,6 +38,6 @@ class ChunkerRegressionTestCase(BaseTestCase):
                             runs.append(H(b''.join(chunks)))
 
         # The "correct" hash below matches the existing chunker behavior.
-        # Future chunker optimisations must not change this, or existing repos will bloat.
+        # Future chunker optimizations must not change this, or existing repos will bloat.
         overall_hash = H(b''.join(runs))
         self.assert_equal(overall_hash, hex_to_bin("a43d0ecb3ae24f38852fcc433a83dacd28fe0748d09cc73fc11b69cf3f1a7299"))

+ 8 - 8
src/borg/testsuite/compress.py

@@ -48,7 +48,7 @@ def test_lz4_buffer_allocation(monkeypatch):
     # disable fallback to no compression on incompressible data
     monkeypatch.setattr(LZ4, 'decide', lambda always_compress: LZ4)
     # test with a rather huge data object to see if buffer allocation / resizing works
-    data = os.urandom(5 * 2**20) * 10  # 50MiB badly compressible data
+    data = os.urandom(5 * 2**20) * 10  # 50 MiB badly compressible data
     assert len(data) == 50 * 2**20
     c = Compressor('lz4')
     cdata = c.compress(data)
@@ -90,8 +90,8 @@ def test_autodetect_invalid():
 
 
 def test_zlib_compat():
-    # for compatibility reasons, we do not add an extra header for zlib,
-    # nor do we expect one when decompressing / autodetecting
+    # For compatibility reasons, we do not add an extra header for zlib,
+    # nor do we expect one when decompressing or autodetecting
     for level in range(10):
         c = get_compressor(name='zlib', level=level)
         cdata1 = c.compress(data)
@@ -109,7 +109,7 @@ def test_compressor():
         dict(name='lz4'),
         dict(name='zstd', level=1),
         dict(name='zstd', level=3),
-        # avoiding high zstd levels, memory needs unclear
+        # Avoiding high zstd levels; memory needs unclear
         dict(name='zlib', level=0),
         dict(name='zlib', level=6),
         dict(name='zlib', level=9),
@@ -118,7 +118,7 @@ def test_compressor():
         params_list += [
             dict(name='lzma', level=0),
             dict(name='lzma', level=6),
-            # we do not test lzma on level 9 because of the huge memory needs
+            # We do not test lzma on level 9 because of the huge memory needs
         ]
     for params in params_list:
         c = Compressor(**params)
@@ -216,12 +216,12 @@ def test_obfuscate():
 )
 def test_padme_obfuscation(data_length, expected_padding):
     compressor = Compressor(name="obfuscate", level=250, compressor=Compressor("none"))
-    # the innner compressor will add an inner header of 2 bytes, so we reduce the data length by 2 bytes
-    # to be able to use (almost) the same test cases as in master branch.
+    # The inner compressor will add an inner header of 2 bytes, so we reduce the data length by 2 bytes
+    # to be able to use (almost) the same test cases as in the master branch.
     data = b"x" * (data_length - 2)
     compressed = compressor.compress(data)
 
-    # the outer "obfuscate" pseudo-compressor adds an outer header of 6 bytes.
+    # The outer "obfuscate" pseudo-compressor adds an outer header of 6 bytes.
     expected_padded_size = 6 + data_length + expected_padding
 
     assert (

+ 18 - 18
src/borg/testsuite/hashindex.py

@@ -15,12 +15,12 @@ from . import BaseTestCase, unopened_tempfile
 
 
 def H(x):
-    # make some 32byte long thing that depends on x
+    # Make some 32-byte long thing that depends on x
     return bytes('%-0.32d' % x, 'ascii')
 
 
 def H2(x):
-    # like H(x), but with pseudo-random distribution of the output value
+    # Like H(x), but with pseudo-random distribution of the output value
     return hashlib.sha256(H(x)).digest()
 
 
@@ -160,12 +160,12 @@ class HashIndexExtraTestCase(BaseTestCase):
     """These tests are separate because they should not become part of the selftest.
     """
     def test_chunk_indexer(self):
-        # see _hashindex.c hash_sizes, we want to be close to the max. load
+        # See _hashindex.c hash_sizes; we want to be close to the maximum load
         # because interesting errors happen there.
         key_count = int(65537 * ChunkIndex.MAX_LOAD_FACTOR) - 10
         index = ChunkIndex(key_count)
         all_keys = [hashlib.sha256(H(k)).digest() for k in range(key_count)]
-        # we're gonna delete 1/3 of all_keys, so let's split them 2/3 and 1/3:
+        # We are going to delete 1/3 of all_keys, so let's split them 2/3 and 1/3:
         keys, to_delete_keys = all_keys[0:(2*key_count//3)], all_keys[(2*key_count//3):]
 
         for i, key in enumerate(keys):
@@ -180,10 +180,10 @@ class HashIndexExtraTestCase(BaseTestCase):
         for key in to_delete_keys:
             assert index.get(key) is None
 
-        # now delete every key still in the index
+        # Now delete every key still in the index
         for key in keys:
             del index[key]
-        # the index should now be empty
+        # The index should now be empty
         assert list(index.iteritems()) == []
 
 
@@ -533,33 +533,33 @@ class IndexCorruptionTestCase(BaseTestCase):
         from struct import pack
 
         def HH(x, y):
-            # make some 32byte long thing that depends on x and y.
-            # same x will mean a collision in the hashtable as bucket index is computed from
-            # first 4 bytes. giving a specific x targets bucket index x.
-            # y is to create different keys and does not go into the bucket index calculation.
-            # so, same x + different y --> collision
+            # Make some 32-byte long thing that depends on x and y.
+            # The same x will mean a collision in the hash table as the bucket index is computed from
+            # the first 4 bytes. Giving a specific x targets bucket index x.
+            # y is used to create different keys and does not go into the bucket index calculation.
+            # Therefore, same x + different y -> collision
             return pack('<IIQQQ', x, y, 0, 0, 0)  # 2 * 4 + 3 * 8 == 32
 
         idx = NSIndex()
 
-        # create lots of colliding entries
+        # Create lots of colliding entries
         for y in range(700):  # stay below max load to not trigger resize
             idx[HH(0, y)] = (0, y)
 
         assert idx.size() == 1031 * 40 + 18  # 1031 buckets + header
 
-        # delete lots of the collisions, creating lots of tombstones
+        # Delete many of the collisions, creating many tombstones
         for y in range(400):  # stay above min load to not trigger resize
             del idx[HH(0, y)]
 
-        # create lots of colliding entries, within the not yet used part of the hashtable
+        # Create many colliding entries within the not-yet-used part of the hash table
         for y in range(330):  # stay below max load to not trigger resize
-            # at y == 259 a resize will happen due to going beyond max EFFECTIVE load
-            # if the bug is present, that element will be inserted at the wrong place.
-            # and because it will be at the wrong place, it can not be found again.
+            # At y == 259 a resize will happen due to going beyond the maximum EFFECTIVE load.
+            # If the bug is present, that element will be inserted at the wrong place.
+            # And because it will be at the wrong place, it cannot be found again.
             idx[HH(600, y)] = 600, y
 
-        # now check if hashtable contents is as expected:
+        # Now check if the hash table contents are as expected:
 
         assert [idx.get(HH(0, y)) for y in range(400, 700)] == [(0, y) for y in range(400, 700)]
 

+ 5 - 5
src/borg/testsuite/hashindex_stress.py

@@ -8,7 +8,7 @@ from ..hashindex import NSIndex
 
 @pytest.mark.skipif("BORG_TESTS_SLOW" not in os.environ, reason="slow tests not enabled, use BORG_TESTS_SLOW=1")
 def test_hashindex_stress():
-    """checks if the hashtable behaves as expected
+    """Check if the hash table behaves as expected
 
     This can be used in _hashindex.c before running this test to provoke more collisions (don't forget to compile):
     #define HASH_MAX_LOAD .99
@@ -19,19 +19,19 @@ def test_hashindex_stress():
     idx = NSIndex()
     kv = {}
     for i in range(LOOPS):
-        # put some entries
+        # Put some entries
         for j in range(ENTRIES):
             k = random.randbytes(32)
             v = random.randint(0, NSIndex.MAX_VALUE - 1)
             idx[k] = (v, v)
             kv[k] = v
-        # check and delete a random amount of entries
+        # Check and delete a random number of entries
         delete_keys = random.sample(list(kv), k=random.randint(0, len(kv)))
         for k in delete_keys:
             v = kv.pop(k)
             assert idx.pop(k) == (v, v)
-        # check if remaining entries are as expected
+        # Check whether the remaining entries are as expected
         for k, v in kv.items():
             assert idx[k] == (v, v)
-        # check entry count
+        # Check entry count
         assert len(kv) == len(idx)

+ 1 - 1
src/borg/testsuite/item.py

@@ -35,7 +35,7 @@ def test_item_empty():
 
 
 def test_item_from_dict():
-    # does not matter whether we get str or bytes keys
+    # It does not matter whether we get str or bytes keys
     item = Item({b'path': '/a/b/c', b'mode': 0o666})
     assert item.path == '/a/b/c'
     assert item.mode == 0o666

+ 1 - 1
src/borg/testsuite/key.py

@@ -57,7 +57,7 @@ class TestKey:
 
     keyfile_blake2_cdata = bytes.fromhex('04fdf9475cf2323c0ba7a99ddc011064f2e7d039f539f2e448'
                                          '0e6f5fc6ff9993d604040404040404098c8cee1c6db8c28947')
-    # Verified against b2sum. Entire string passed to BLAKE2, including the padded 64 byte key contained in
+    # Verified against b2sum. Entire string passed to BLAKE2, including the padded 64-byte key contained in
     # keyfile_blake2_key_file above is
     # 19280471de95185ec27ecb6fc9edbb4f4db26974c315ede1cd505fab4250ce7cd0d081ea66946c
     # 95f0db934d5f616921efbd869257e8ded2bd9bd93d7f07b1a30000000000000000000000000000

+ 1 - 1
src/borg/testsuite/nonces.py

@@ -182,7 +182,7 @@ class TestNonceManager:
         assert self.cache_nonce() == "0000000000002033"
         assert self.repository.next_free == 0x2033
 
-        # somehow the clients unlocks, another client reserves and this client relocks
+        # somehow the client unlocks, another client reserves, and this client relocks
         self.repository.next_free = 0x4000
 
         # enough space in reservation

+ 6 - 10
src/borg/testsuite/repository.py

@@ -57,7 +57,7 @@ class RepositoryTestCaseBase(BaseTestCase):
     def repo_dump(self, label=None):
         label = label + ': ' if label is not None else ''
         H_trans = {H(i): i for i in range(10)}
-        H_trans[None] = -1  # key == None appears in commits
+        H_trans[None] = -1  # key is None appears in commits
         tag_trans = {TAG_PUT: 'put', TAG_DELETE: 'del', TAG_COMMIT: 'comm'}
         for segment, fn in self.repository.io.segment_iterator():
             for tag, key, offset, size in self.repository.io.iter_objects(segment):
@@ -84,8 +84,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
                 self.assert_equal(repository2.get(H(x)), b'SOMEDATA')
 
     def test2(self):
-        """Test multiple sequential transactions
-        """
+        """Test multiple sequential transactions."""
         self.repository.put(H(0), b'foo')
         self.repository.put(H(1), b'foo')
         self.repository.commit(compact=False)
@@ -95,8 +94,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
         self.assert_equal(self.repository.get(H(1)), b'bar')
 
     def test_consistency(self):
-        """Test cache consistency
-        """
+        """Test cache consistency."""
         self.repository.put(H(0), b'foo')
         self.assert_equal(self.repository.get(H(0)), b'foo')
         self.repository.put(H(0), b'foo2')
@@ -107,8 +105,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
         self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(H(0)))
 
     def test_consistency2(self):
-        """Test cache consistency2
-        """
+        """Test cache consistency 2."""
         self.repository.put(H(0), b'foo')
         self.assert_equal(self.repository.get(H(0)), b'foo')
         self.repository.commit(compact=False)
@@ -118,8 +115,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
         self.assert_equal(self.repository.get(H(0)), b'foo')
 
     def test_overwrite_in_same_transaction(self):
-        """Test cache consistency2
-        """
+        """Test cache consistency 2."""
         self.repository.put(H(0), b'foo')
         self.repository.put(H(0), b'foo2')
         self.repository.commit(compact=False)
@@ -181,7 +177,7 @@ class RepositoryTestCase(RepositoryTestCaseBase):
         self.repository.delete(H(0))
 
 class LocalRepositoryTestCase(RepositoryTestCaseBase):
-    # test case that doesn't work with remote repositories
+    # Test case that doesn't work with remote repositories.
 
     def _assert_sparse(self):
         # The superseded 123456... PUT

+ 2 - 2
src/borg/testsuite/shellpattern.py

@@ -82,10 +82,10 @@ def test_match(path, patterns):
     ("foo", ["?foo"]),
     ("foo", ["f?oo"]),
 
-    # do not match path separator
+    # Do not match path separator
     ("foo/ar", ["foo?ar"]),
 
-    # do not match/cross over os.path.sep
+    # Do not match/cross over os.path.sep
     ("foo/bar", ["*"]),
     ("foo/bar", ["foo*bar"]),
     ("foo/bar", ["foo*ar"]),

+ 22 - 22
src/borg/testsuite/upgrader.py

@@ -17,9 +17,9 @@ ATTIC_TAR = os.path.join(os.path.dirname(__file__), 'attic.tar.gz')
 
 def untar(tarfname, path, what):
     """
-    extract <tarfname> tar archive to <path>, all stuff starting with <what>.
+    Extract the <tarfname> tar archive to <path>, including all entries starting with <what>.
 
-    return path to <what>.
+    Return the path to <what>.
     """
 
     def files(members):
@@ -35,10 +35,10 @@ def untar(tarfname, path, what):
 
 def repo_valid(path):
     """
-    utility function to check if borg can open a repository
+    Utility function to check if Borg can open a repository.
 
     :param path: the path to the repository
-    :returns: if borg can check the repository
+    :returns: whether Borg can check the repository
     """
     with Repository(str(path), exclusive=True, create=False) as repository:
         # can't check raises() because check() handles the error
@@ -47,10 +47,10 @@ def repo_valid(path):
 
 def key_valid(path):
     """
-    check that the new keyfile is alright
+    Check that the new key file is valid.
 
     :param path: the path to the key file
-    :returns: if the file starts with the borg magic string
+    :returns: whether the file starts with the Borg magic string
     """
     keyfile = os.path.join(get_keys_dir(),
                            os.path.basename(path))
@@ -60,10 +60,10 @@ def key_valid(path):
 
 def make_attic_repo(dir):
     """
-    create an attic repo with some stuff in it
+    Create an Attic repo with some content in it.
 
     :param dir: path to the repository to be created
-    :returns: path to attic repository
+    :returns: path to the Attic repository
     """
     # there is some stuff in that repo, copied from `RepositoryTestCase.test1`
     return untar(ATTIC_TAR, str(dir), 'repo')
@@ -80,13 +80,13 @@ def inplace(request):
 
 
 def test_convert_segments(attic_repo, inplace):
-    """test segment conversion
+    """Test segment conversion.
 
-    this will load the given attic repository, list all the segments
-    then convert them one at a time. we need to close the repo before
-    conversion otherwise we have errors from borg
+    This will load the given Attic repository, list all the segments,
+    then convert them one at a time. We need to close the repo before
+    conversion; otherwise we have errors from Borg.
 
-    :param attic_repo: a populated attic repository (fixture)
+    :param attic_repo: a populated Attic repository (fixture)
     """
     repo_path = attic_repo
     with pytest.raises(Repository.AtticRepository):
@@ -102,21 +102,21 @@ def test_convert_segments(attic_repo, inplace):
 @pytest.fixture()
 def attic_key_file(tmpdir, monkeypatch):
     """
-    create an attic key file from the given repo, in the keys
-    subdirectory of the given tmpdir
+    Create an Attic key file from the given repo, in the keys
+    subdirectory of the given tmpdir.
 
-    :param tmpdir: a temporary directory (a builtin fixture)
-    :returns: path to key file
+    :param tmpdir: a temporary directory (a built-in fixture)
+    :returns: path to the key file
     """
     keys_dir = untar(ATTIC_TAR, str(tmpdir), 'keys')
 
-    # we use the repo dir for the created keyfile, because we do
-    # not want to clutter existing keyfiles
+    # We use the repo dir for the created key file, because we do
+    # not want to clutter existing key files.
     monkeypatch.setenv('ATTIC_KEYS_DIR', keys_dir)
 
-    # we use the same directory for the converted files, which
-    # will clutter the previously created one, which we don't care
-    # about anyways. in real runs, the original key will be retained.
+    # We use the same directory for the converted files, which
+    # will clutter the previously created onewhich we don't care
+    # about anyway. In real runs, the original key will be retained.
     monkeypatch.setenv('BORG_KEYS_DIR', keys_dir)
     monkeypatch.setenv('ATTIC_PASSPHRASE', 'test')
 

+ 4 - 4
src/borg/testsuite/xattr.py

@@ -22,7 +22,7 @@ class XattrTestCase(BaseTestCase):
         os.unlink(self.symlink)
 
     def assert_equal_se(self, is_x, want_x):
-        # check 2 xattr lists for equality, but ignore security.selinux attr
+        # Check two xattr lists for equality, but ignore the security.selinux attribute.
         is_x = set(is_x) - {b'security.selinux', b'com.apple.provenance'}
         want_x = set(want_x)
         self.assert_equal(is_x, want_x)
@@ -38,7 +38,7 @@ class XattrTestCase(BaseTestCase):
         setxattr(tmp_fd, b'user.bar', b'foo')
         setxattr(tmp_fn, b'user.empty', b'')
         if not is_linux:
-            # linux does not allow setting user.* xattrs on symlinks
+            # Linux does not allow setting user.* xattrs on symlinks.
             setxattr(tmp_lfn, b'user.linkxattr', b'baz')
         self.assert_equal_se(listxattr(tmp_fn), [b'user.foo', b'user.bar', b'user.empty'])
         self.assert_equal_se(listxattr(tmp_fd), [b'user.foo', b'user.bar', b'user.empty'])
@@ -54,9 +54,9 @@ class XattrTestCase(BaseTestCase):
 
     def test_listxattr_buffer_growth(self):
         tmp_fn = os.fsencode(self.tmpfile.name)
-        # make it work even with ext4, which imposes rather low limits
+        # Make it work even with ext4, which imposes rather low limits.
         buffer.resize(size=64, init=True)
-        # xattr raw key list will be > 64
+        # xattr raw key list will be greater than 64
         keys = [b'user.attr%d' % i for i in range(20)]
         for key in keys:
             setxattr(tmp_fn, key, b'x')