瀏覽代碼

code: fix typos and grammar

Thomas Waldmann 1 周之前
父節點
當前提交
3e6a9d677f

+ 1 - 1
src/borg/__init__.py

@@ -1,6 +1,6 @@
 from packaging.version import parse as parse_version
 
-# IMPORTANT keep imports from borg here to a minimum because our testsuite depends on
+# IMPORTANT keep imports from borg here to a minimum because our test suite depends on
 # being able to import borg.constants and then monkey patching borg.constants.PBKDF2_ITERATIONS
 from ._version import version as __version__
 

+ 5 - 5
src/borg/__main__.py

@@ -1,12 +1,12 @@
 import sys
 import os
 
-# On windows loading the bundled libcrypto dll fails if the folder
-# containing the dll is not in the search path. The dll is shipped
-# with python in the "DLLs" folder, so let's add this folder
-# to the path. The folder is always in sys.path, get it from there.
+# On Windows, loading the bundled libcrypto DLL fails if the folder
+# containing the DLL is not in the search path. The DLL is shipped
+# with Python in the "DLLs" folder, so let's add this folder
+# to the PATH. The folder is always in sys.path; get it from there.
 if sys.platform.startswith('win32'):
-    # Keep it an iterable to support multiple folder which contain "DLLs".
+    # Keep it as an iterable to support multiple folders that contain "DLLs".
     dll_path = (p for p in sys.path if 'DLLs' in os.path.normpath(p).split(os.path.sep))
     os.environ['PATH'] = os.pathsep.join(dll_path) + os.pathsep + os.environ['PATH']
 

+ 1 - 1
src/borg/algorithms/__init__.py

@@ -8,6 +8,6 @@ Ideally these would be sourced from existing libraries,
 but:
 
 - are frequently not available yet (lz4, zstd),
-- are available but in poor form (crc32), or
+- are available but in poor form (CRC32), or
 - don't really make sense as a library (xxHash).
 """

+ 11 - 11
src/borg/archive.py

@@ -219,20 +219,20 @@ def backup_io_iter(iterator):
 
 def stat_update_check(st_old, st_curr):
     """
-    this checks for some race conditions between the first filename-based stat()
-    we did before dispatching to the (hopefully correct) file type backup handler
-    and the (hopefully) fd-based fstat() we did in the handler.
+    This checks for race conditions between the first filename-based stat()
+    performed before dispatching to the (hopefully correct) file type backup handler
+    and the (hopefully) fd-based fstat() performed in the handler.
 
-    if there is a problematic difference (e.g. file type changed), we rather
-    skip the file than being tricked into a security problem.
+    If there is a problematic difference (e.g., the file type changed), we would rather
+    skip the file than risk a security problem.
 
-    such races should only happen if:
-    - we are backing up a live filesystem (no snapshot, not inactive)
-    - if files change due to normal fs activity at an unfortunate time
-    - if somebody is doing an attack against us
+    Such races should only happen if:
+    - We are backing up a live filesystem (no snapshot, not inactive).
+    - Files change due to normal fs activity at an unfortunate time.
+    - Somebody is performing an attack against us.
     """
-    # assuming that a file type change implicates a different inode change AND that inode numbers
-    # are not duplicate in a short timeframe, this check is redundant and solved by the ino check:
+    # assuming that a file type change implies a different inode change AND that inode numbers
+    # are not duplicated in a short timeframe, this check is redundant and solved by the ino check:
     if stat.S_IFMT(st_old.st_mode) != stat.S_IFMT(st_curr.st_mode):
         # in this case, we dispatched to wrong handler - abort
         raise BackupRaceConditionError('file type changed (race condition), skipping file')

+ 7 - 7
src/borg/archiver.py

@@ -1,9 +1,9 @@
-# borg cli interface / toplevel archiver code
+# Borg CLI interface / top-level archiver code
 
 import sys
 import traceback
 
-# quickfix to disallow running borg with assertions switched off
+# Quick fix to disallow running Borg with assertions switched off
 try:
     assert False
 except AssertionError:
@@ -98,9 +98,9 @@ try:
     from .selftest import selftest
     from .upgrader import AtticRepositoryUpgrader, BorgRepositoryUpgrader
 except BaseException:
-    # an unhandled exception in the try-block would cause the borg cli command to exit with rc 1 due to python's
-    # default behavior, see issue #4424.
-    # as borg defines rc 1 as WARNING, this would be a mismatch, because a crash should be an ERROR (rc 2).
+    # An unhandled exception in the try-block would cause the Borg CLI command to exit with rc 1 due to Python's
+    # default behavior; see issue #4424.
+    # As Borg defines rc 1 as WARNING, this would be a mismatch, because a crash should be an ERROR (rc 2).
     traceback.print_exc()
     sys.exit(2)  # == EXIT_ERROR
 
@@ -160,7 +160,7 @@ def with_repository(fake=False, invert_fake=False, create=False, lock=True,
     # `nonlocal` statement to access `lock` as modifications would also
     # affect the scope outside of `wrapper`. Subsequent calls would
     # only see the overwritten value of `lock`, not the original one.
-    # The solution is to define a place holder variable `_lock` to
+    # The solution is to define a placeholder variable `_lock` to
     # propagate the value into `wrapper`.
     _lock = lock
 
@@ -5316,7 +5316,7 @@ class Archiver:
         selftest(logger)
 
     def _setup_implied_logging(self, args):
-        """ turn on INFO level logging for args that imply that they will produce output """
+        """Turn on INFO level logging for args that imply that they will produce output."""
         # map of option name to name of logger for that option
         option_logger = {
             'output_list': 'borg.output.list',

+ 6 - 6
src/borg/cache.py

@@ -35,7 +35,7 @@ from .platform import SaveFile
 from .remote import cache_if_remote
 from .repository import LIST_SCAN_LIMIT
 
-# note: cmtime might me either a ctime or a mtime timestamp
+# note: cmtime might be either a ctime or an mtime timestamp
 FileCacheEntry = namedtuple('FileCacheEntry', 'age inode size cmtime chunk_ids')
 
 
@@ -69,7 +69,7 @@ class SecurityManager:
 
     @staticmethod
     def destroy(repository, path=None):
-        """destroy the security dir for ``repository`` or at ``path``"""
+        """Destroy the security dir for ``repository`` or at ``path``."""
         path = path or get_security_dir(repository.id_str)
         if os.path.exists(path):
             shutil.rmtree(path)
@@ -338,7 +338,7 @@ class CacheConfig:
 
 
 class Cache:
-    """Client Side cache
+    """Client-side cache.
     """
     class CacheInitAbortedError(Error):
         """Cache initialization aborted"""
@@ -367,7 +367,7 @@ class Cache:
 
     @staticmethod
     def destroy(repository, path=None):
-        """destroy the cache for ``repository`` or at ``path``"""
+        """Destroy the cache for ``repository`` or at ``path``."""
         path = path or os.path.join(get_cache_dir(), repository.id_str)
         config = os.path.join(path, 'config')
         if os.path.exists(config):
@@ -416,7 +416,7 @@ Chunk index:    {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
 
     def __init__(self, iec=False):
         self.iec = iec
-        self.pre12_meta = {}  # here we cache archive metadata for borg < 1.2
+        self.pre12_meta = {}  # Here we cache archive metadata for Borg < 1.2.
 
     def __str__(self):
         return self.str_format.format(self.format_tuple())
@@ -892,7 +892,7 @@ class LocalCache(CacheStatsMixin):
             return chunk_idx
 
         def legacy_cleanup():
-            """bring old cache dirs into the desired state (cleanup and adapt)"""
+            """Bring old cache dirs into the desired state (cleanup and adapt)."""
             try:
                 os.unlink(os.path.join(self.path, 'chunks.archive'))
             except:

+ 21 - 21
src/borg/constants.py

@@ -1,27 +1,27 @@
-# this set must be kept complete, otherwise the RobustUnpacker might malfunction:
+# This set must be kept complete; otherwise the RobustUnpacker might malfunction:
 ITEM_KEYS = frozenset(['path', 'source', 'rdev', 'chunks', 'chunks_healthy', 'hardlink_master',
                        'mode', 'user', 'group', 'uid', 'gid', 'mtime', 'atime', 'ctime', 'birthtime', 'size',
                        'xattrs', 'bsdflags', 'acl_nfs4', 'acl_access', 'acl_default', 'acl_extended',
                        'part'])
 
-# this is the set of keys that are always present in items:
+# This is the set of keys that are always present in items:
 REQUIRED_ITEM_KEYS = frozenset(['path', 'mtime', ])
 
-# this set must be kept complete, otherwise rebuild_manifest might malfunction:
+# This set must be kept complete; otherwise rebuild_manifest might malfunction:
 ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'hostname', 'username', 'time', 'time_end',
                           'comment', 'chunker_params',
                           'recreate_cmdline',
                           'recreate_source_id', 'recreate_args', 'recreate_partial_chunks',  # used in 1.1.0b1 .. b2
                           'size', 'csize', 'nfiles', 'size_parts', 'csize_parts', 'nfiles_parts', ])
 
-# this is the set of keys that are always present in archives:
+# This is the set of keys that are always present in archives:
 REQUIRED_ARCHIVE_KEYS = frozenset(['version', 'name', 'items', 'cmdline', 'time', ])
 
-# default umask, overridden by --umask, defaults to read/write only for owner
+# Default umask, overridden by --umask; defaults to read/write only for owner
 UMASK_DEFAULT = 0o077
 
-# default file mode to store stdin data, defaults to read/write for owner and group
-# forcing to 0o100XXX later
+# Default file mode to store stdin data; defaults to read/write for owner and group.
+# Forcing to 0o100XXX later.
 STDIN_MODE_DEFAULT = 0o660
 
 CACHE_TAG_NAME = 'CACHEDIR.TAG'
@@ -34,7 +34,7 @@ CACHE_TAG_CONTENTS = b'Signature: 8a477f597d28d172789f06886806bc55'
 DEFAULT_MAX_SEGMENT_SIZE = 500 * 1024 * 1024
 
 # 20 MiB minus 41 bytes for a Repository header (because the "size" field in the Repository includes
-# the header, and the total size was set to 20 MiB).
+# the header, and the total size is set to 20 MiB).
 MAX_DATA_SIZE = 20971479
 
 # MAX_OBJECT_SIZE = <20 MiB (MAX_DATA_SIZE) + 41 bytes for a Repository PUT header, which consists of
@@ -52,12 +52,12 @@ zeros = bytes(MAX_DATA_SIZE)
 # borg.remote read() buffer size
 BUFSIZE = 10 * 1024 * 1024
 
-# to use a safe, limited unpacker, we need to set a upper limit to the archive count in the manifest.
-# this does not mean that you can always really reach that number, because it also needs to be less than
-# MAX_DATA_SIZE or it will trigger the check for that.
+# To use a safe, limited unpacker, we need to set an upper limit for the archive count in the manifest.
+# This does not mean that you can always reach that number, because it also needs to be less than
+# MAX_DATA_SIZE, otherwise it will trigger the check for that.
 MAX_ARCHIVES = 400000
 
-# repo.list() / .scan() result count limit the borg client uses
+# repo.list()/.scan() result count limit used by the Borg client
 LIST_SCAN_LIMIT = 100000
 
 DEFAULT_SEGMENTS_PER_DIR = 1000
@@ -70,10 +70,10 @@ MAX_SEGMENT_DIR_INDEX = 2**32 - 1
 
 FD_MAX_AGE = 4 * 60  # 4 minutes
 
-CHUNK_MIN_EXP = 19  # 2**19 == 512kiB
-CHUNK_MAX_EXP = 23  # 2**23 == 8MiB
-HASH_WINDOW_SIZE = 0xfff  # 4095B
-HASH_MASK_BITS = 21  # results in ~2MiB chunks statistically
+CHUNK_MIN_EXP = 19  # 2**19 == 512 KiB
+CHUNK_MAX_EXP = 23  # 2**23 == 8 MiB
+HASH_WINDOW_SIZE = 0xfff  # 4095 B
+HASH_MASK_BITS = 21  # Results in ~2 MiB chunks statistically
 
 # chunker algorithms
 CH_BUZHASH = 'buzhash'
@@ -88,9 +88,9 @@ ITEMS_CHUNKER_PARAMS = (CH_BUZHASH, 15, 19, 17, HASH_WINDOW_SIZE)
 # normal on-disk data, allocated (but not written, all zeros), not allocated hole (all zeros)
 CH_DATA, CH_ALLOC, CH_HOLE = 0, 1, 2
 
-# operating mode of the files cache (for fast skipping of unchanged files)
+# Operating mode of the files cache (for fast skipping of unchanged files)
 FILES_CACHE_MODE_UI_DEFAULT = 'ctime,size,inode'  # default for "borg create" command (CLI UI)
-FILES_CACHE_MODE_DISABLED = 'd'  # most borg commands do not use the files cache at all (disable)
+FILES_CACHE_MODE_DISABLED = 'd'  # Most Borg commands do not use the files cache at all (disable).
 
 # return codes returned by borg command
 EXIT_SUCCESS = 0  # everything done, no problems
@@ -100,9 +100,9 @@ EXIT_ERROR_BASE = 3  # specific error codes are 3..99 (enabled by BORG_EXIT_CODE
 EXIT_WARNING_BASE = 100  # specific warning codes are 100..127 (enabled by BORG_EXIT_CODES=modern)
 EXIT_SIGNAL_BASE = 128  # terminated due to signal, rc = 128 + sig_no
 
-# never use datetime.isoformat(), it is evil. always use one of these:
-# datetime.strftime(ISO_FORMAT)  # output always includes .microseconds
-# datetime.strftime(ISO_FORMAT_NO_USECS)  # output never includes microseconds
+# Never use datetime.isoformat(); it is problematic. Always use one of these:
+# datetime.strftime(ISO_FORMAT)  # Output always includes .microseconds
+# datetime.strftime(ISO_FORMAT_NO_USECS)  # Output never includes microseconds
 ISO_FORMAT_NO_USECS = '%Y-%m-%dT%H:%M:%S'
 ISO_FORMAT = ISO_FORMAT_NO_USECS + '.%f'
 

+ 10 - 10
src/borg/crypto/file_integrity.py

@@ -43,14 +43,14 @@ class FileHashingWrapper(FileLikeWrapper):
     Wrapper for file-like objects that computes a hash on-the-fly while reading/writing.
 
     WARNING: Seeks should only be used to query the size of the file, not
-    to skip data, because skipped data isn't read and not hashed into the digest.
+    to skip data, because skipped data is not read and therefore not hashed into the digest.
 
-    Similarly skipping while writing to create sparse files is also not supported.
+    Similarly, skipping while writing to create sparse files is also not supported.
 
-    Data has to be read/written in a symmetric fashion, otherwise different
+    Data has to be read/written in a symmetric fashion; otherwise different
     digests will be generated.
 
-    Note: When used as a context manager read/write operations outside the enclosed scope
+    Note: When used as a context manager, read/write operations outside the enclosed scope
     are illegal.
     """
 
@@ -85,9 +85,9 @@ class FileHashingWrapper(FileLikeWrapper):
 
     def hexdigest(self):
         """
-        Return current digest bytes as hex-string.
+        Return the current digest as a hex string.
 
-        Note: this can be called multiple times.
+        Note: This can be called multiple times.
         """
         return self.hash.hexdigest()
 
@@ -96,7 +96,7 @@ class FileHashingWrapper(FileLikeWrapper):
 
     def hash_length(self, seek_to_end=False):
         if seek_to_end:
-            # Add length of file to the hash to avoid problems if only a prefix is read.
+            # Add the length of the file to the hash to avoid problems if only a prefix is read.
             self.seek(0, io.SEEK_END)
         self.hash.update(str(self.tell()).encode())
 
@@ -150,10 +150,10 @@ class IntegrityCheckedFile(FileLikeWrapper):
             return self.parse_integrity_data(path, integrity_data)
 
     def hash_filename(self, filename=None):
-        # Hash the name of the file, but only the basename, ie. not the path.
-        # In Borg the name itself encodes the context (eg. index.N, cache, files),
+        # Hash the name of the file, but only the basename, i.e. not the path.
+        # In Borg the name itself encodes the context (e.g., index.N, cache, files),
         # while the path doesn't matter, and moving e.g. a repository or cache directory is supported.
-        # Changing the name however imbues a change of context that is not permissible.
+        # Changing the name, however, implies a change of context that is not permissible.
         # While Borg does not use anything except ASCII in these file names, it's important to use
         # the same encoding everywhere for portability. Using os.fsencode() would be wrong.
         filename = os.path.basename(filename or self.path)

+ 13 - 14
src/borg/crypto/key.py

@@ -37,22 +37,22 @@ AUTHENTICATED_NO_KEY = 'authenticated_no_key' in helpers.workarounds
 
 
 class NoPassphraseFailure(Error):
-    """can not acquire a passphrase: {}"""
+    """Cannot acquire a passphrase: {}"""
     exit_mcode = 50
 
 
 class PasscommandFailure(Error):
-    """passcommand supplied in BORG_PASSCOMMAND failed: {}"""
+    """Passcommand supplied in BORG_PASSCOMMAND failed: {}"""
     exit_mcode = 51
 
 
 class PassphraseWrong(Error):
-    """passphrase supplied in BORG_PASSPHRASE, by BORG_PASSCOMMAND or via BORG_PASSPHRASE_FD is incorrect."""
+    """Passphrase supplied via BORG_PASSPHRASE, by BORG_PASSCOMMAND, or via BORG_PASSPHRASE_FD is incorrect."""
     exit_mcode = 52
 
 
 class PasswordRetriesExceeded(Error):
-    """exceeded the maximum password retries"""
+    """Exceeded the maximum password retries."""
     exit_mcode = 53
 
 
@@ -193,7 +193,7 @@ class KeyBase:
     chunk_seed = None
 
     # Whether this *particular instance* is encrypted from a practical point of view,
-    # i.e. when it's using encryption with a empty passphrase, then
+    # i.e. when it's using encryption with an empty passphrase, then
     # that may be *technically* called encryption, but for all intents and purposes
     # that's as good as not encrypting in the first place, and this member should be False.
     #
@@ -212,8 +212,7 @@ class KeyBase:
         self.tam_required = True
 
     def id_hash(self, data):
-        """Return HMAC hash using the "id" HMAC key
-        """
+        """Return HMAC using the "id" HMAC key."""
 
     def encrypt(self, chunk):
         pass
@@ -297,7 +296,7 @@ class KeyBase:
         """Unpack msgpacked *data* and return (object, did_verify, salt)."""
         tam_required = self.tam_required
         if force_tam_not_required and tam_required:
-            # for a long time, borg only checked manifest for "tam_required" and
+            # For a long time, Borg only checked the manifest for "tam_required" and
             # people might have archives without TAM, so don't be too annoyingly loud here:
             logger.debug('Archive authentication DISABLED.')
             tam_required = False
@@ -394,7 +393,7 @@ def random_blake2b_256_key():
     # and len(block) >= len(state), hence wide.)
     # In other words, a key longer than 64 bytes would have simply no advantage, since the function
     # has no way of propagating more than 64 bytes of entropy internally.
-    # It's padded to a full block so that the key is never buffered internally by blake2b_update, ie.
+    # It's padded to a full block so that the key is never buffered internally by blake2b_update, i.e.
     # it remains in a single memory location that can be tracked and could be erased securely, if we
     # wanted to.
     return os.urandom(64) + bytes(64)
@@ -430,14 +429,14 @@ class ID_HMAC_SHA_256:
 
 class AESKeyBase(KeyBase):
     """
-    Common base class shared by KeyfileKey and PassphraseKey
+    Common base class shared by KeyfileKey and PassphraseKey.
 
-    Chunks are encrypted using 256bit AES in Counter Mode (CTR)
+    Chunks are encrypted using 256-bit AES in Counter Mode (CTR).
 
     Payload layout: TYPE(1) + HMAC(32) + NONCE(8) + CIPHERTEXT
 
-    To reduce payload size only 8 bytes of the 16 bytes nonce is saved
-    in the payload, the first 8 bytes are always zeros. This does not
+    To reduce payload size, only 8 bytes of the 16-byte nonce are saved
+    in the payload; the first 8 bytes are always zeros. This does not
     affect security but limits the maximum repository capacity to
     only 295 exabytes!
     """
@@ -520,7 +519,7 @@ class Passphrase(str):
     def env_passcommand(cls, default=None):
         passcommand = os.environ.get('BORG_PASSCOMMAND', None)
         if passcommand is not None:
-            # passcommand is a system command (not inside pyinstaller env)
+            # The passcommand is a system command (not inside the PyInstaller environment)
             env = prepare_subprocess_env(system=True)
             try:
                 passphrase = subprocess.check_output(shlex.split(passcommand), text=True, env=env)

+ 1 - 1
src/borg/crypto/keymanager.py

@@ -10,7 +10,7 @@ from .key import KeyfileKey, KeyfileNotFoundError, RepoKeyNotFoundError, KeyBlob
 
 
 class NotABorgKeyFile(Error):
-    """This file is not a borg key backup, aborting."""
+    """This file is not a Borg key backup, aborting."""
     exit_mcode = 43
 
 

+ 10 - 10
src/borg/crypto/nonces.py

@@ -10,7 +10,7 @@ from ..remote import InvalidRPCMethod
 from .low_level import bytes_to_long, long_to_bytes
 
 MAX_REPRESENTABLE_NONCE = 2**64 - 1
-NONCE_SPACE_RESERVATION = 2**28  # This in units of AES blocksize (16 bytes)
+NONCE_SPACE_RESERVATION = 2**28  # This is in units of AES block size (16 bytes)
 
 
 class NonceManager:
@@ -54,12 +54,12 @@ class NonceManager:
 
     def ensure_reservation(self, nonce, nonce_space_needed):
         """
-        Call this before doing encryption, give current, yet unused, integer IV as <nonce>
+        Call this before doing encryption; give the current, yet unused, integer IV as <nonce>
         and the amount of subsequent (counter-like) IVs needed as <nonce_space_needed>.
-        Return value is the IV (counter) integer you shall use for encryption.
+        The return value is the IV (counter) integer you should use for encryption.
 
-        Note: this method may return the <nonce> you gave, if a reservation for it exists or
-              can be established, so make sure you give a unused nonce.
+        Note: This method may return the <nonce> you gave if a reservation for it exists or
+              can be established, so make sure you give an unused nonce.
         """
         # Nonces may never repeat, even if a transaction aborts or the system crashes.
         # Therefore a part of the nonce space is reserved before any nonce is used for encryption.
@@ -67,16 +67,16 @@ class NonceManager:
         # against nonce reuse in crashes and transaction aborts. In that case the reservation still
         # persists and the whole reserved space is never reused.
         #
-        # Local storage on the client is used to protect against an attacker that is able to rollback the
+        # Local storage on the client is used to protect against an attacker that is able to roll back the
         # state of the server or can do arbitrary modifications to the repository.
-        # Storage on the server is used for the multi client use case where a transaction on client A is
+        # Storage on the server is used for the multi-client use case where a transaction on client A is
         # aborted and later client B writes to the repository.
         #
-        # This scheme does not protect against attacker who is able to rollback the state of the server
-        # or can do arbitrary modifications to the repository in the multi client usecase.
+        # This scheme does not protect against an attacker who is able to roll back the state of the server
+        # or can do arbitrary modifications to the repository in the multi-client use case.
 
         if self.end_of_nonce_reservation:
-            # we already got a reservation, if nonce_space_needed still fits everything is ok
+            # We already have a reservation; if nonce_space_needed still fits, everything is okay.
             next_nonce = nonce
             assert next_nonce <= self.end_of_nonce_reservation
             if next_nonce + nonce_space_needed <= self.end_of_nonce_reservation:

+ 2 - 2
src/borg/fuse.py

@@ -59,7 +59,7 @@ def fuse_main():
 # size of some LRUCaches (1 element per simultaneously open file)
 # note: _inode_cache might have rather large elements - Item.chunks can be large!
 #       also, simultaneously reading too many files should be avoided anyway.
-#       thus, do not set FILES to high values.
+#       thus, do not set FILES to very high values.
 FILES = 4
 
 
@@ -524,7 +524,7 @@ class FuseOperations(llfuse.Operations, FuseBackend):
         if mount_options:
             options.extend(mount_options.split(','))
         if is_darwin:
-            # macFUSE supports a volname mount option to give what finder displays on desktop / in directory list.
+            # macFUSE supports a volname mount option to give what Finder displays on the desktop / in directory list.
             volname = pop_option(options, 'volname', '', '', str)
             # if the user did not specify it, we make something up,
             # because otherwise it would be "macFUSE Volume 0 (Python)", #7690.

+ 1 - 1
src/borg/fuse_impl.py

@@ -1,5 +1,5 @@
 """
-load library for lowlevel FUSE implementation
+Load library for low-level FUSE implementation.
 """
 
 import os

+ 18 - 19
src/borg/helpers/__init__.py

@@ -1,9 +1,8 @@
 """
-This package contains all sorts of small helper / utility functionality,
-that did not fit better elsewhere.
+This package contains various small helper/utility functions that did not fit better elsewhere.
 
-Code used to be in borg/helpers.py but was split into the modules in this
-package, which are imported into here for compatibility.
+Code used to be in borg/helpers.py but was split into modules in this
+package, which are imported here for compatibility.
 """
 from contextlib import contextmanager
 
@@ -22,9 +21,9 @@ from .yes import *  # NOQA
 from .msgpack import is_slow_msgpack, is_supported_msgpack, int_to_bigint, bigint_to_int, get_limited_unpacker
 from . import msgpack
 
-# generic mechanism to enable users to invoke workarounds by setting the
+# Generic mechanism to enable users to invoke workarounds by setting the
 # BORG_WORKAROUNDS environment variable to a list of comma-separated strings.
-# see the docs for a list of known workaround strings.
+# See the docs for a list of known workaround strings.
 workarounds = tuple(os.environ.get('BORG_WORKAROUNDS', '').split(','))
 
 
@@ -33,7 +32,7 @@ def ignore_invalid_archive_tam():
     global workarounds
     saved = workarounds
     if 'ignore_invalid_archive_tam' not in workarounds:
-        # we really need this workaround here or borg will likely raise an exception.
+        # We really need this workaround here, or Borg will likely raise an exception.
         workarounds += ('ignore_invalid_archive_tam',)
     yield
     workarounds = saved
@@ -43,7 +42,7 @@ def ignore_invalid_archive_tam():
 warning_info = namedtuple("warning_info", "wc,msg,args,wt")
 
 """
-The global warnings_list variable is used to collect warning_info elements while borg is running.
+The global warnings_list variable is used to collect warning_info elements while Borg is running.
 """
 _warnings_list = []
 
@@ -80,8 +79,8 @@ def classify_ec(ec):
 
 
 def max_ec(ec1, ec2):
-    """return the more severe error code of ec1 and ec2"""
-    # note: usually, there can be only 1 error-class ec, the other ec is then either success or warning.
+    """Return the more severe error code of ec1 and ec2."""
+    # Note: Usually, there can be only one error-class ec; the other ec is then either success or warning.
     ec1_class = classify_ec(ec1)
     ec2_class = classify_ec(ec2)
     if ec1_class == "signal":
@@ -102,7 +101,7 @@ def max_ec(ec1, ec2):
 
 def set_ec(ec):
     """
-    Sets the exit code of the program to ec IF ec is more severe than the current exit code.
+    Set the exit code of the program to ec IF ec is more severe than the current exit code.
     """
     global _exit_code
     _exit_code = max_ec(_exit_code, ec)
@@ -110,7 +109,7 @@ def set_ec(ec):
 
 def init_ec_warnings(ec=EXIT_SUCCESS, warnings=None):
     """
-    (Re-)Init the globals for the exit code and the warnings list.
+    (Re-)Initialize the globals for the exit code and the warnings list.
     """
     global _exit_code, _warnings_list
     _exit_code = ec
@@ -121,7 +120,7 @@ def init_ec_warnings(ec=EXIT_SUCCESS, warnings=None):
 
 def get_ec(ec=None):
     """
-    compute the final return code of the borg process
+    Compute the final return code of the Borg process.
     """
     if ec is not None:
         set_ec(ec)
@@ -129,21 +128,21 @@ def get_ec(ec=None):
     global _exit_code
     exit_code_class = classify_ec(_exit_code)
     if exit_code_class in ("signal", "error", "warning"):
-        # there was a signal/error/warning, return its exit code
+        # There was a signal/error/warning; return its exit code.
         return _exit_code
     assert exit_code_class == "success"
     global _warnings_list
     if not _warnings_list:
-        # we do not have any warnings in warnings list, return success exit code
+        # We do not have any warnings in the warnings list; return the success exit code.
         return _exit_code
-    # looks like we have some warning(s)
+    # There are some warning(s).
     rcs = sorted(set(w_info.wc for w_info in _warnings_list))
     logger.debug(f"rcs: {rcs!r}")
     if len(rcs) == 1:
-        # easy: there was only one kind of warning, so we can be specific
+        # Easy: there was only one kind of warning, so we can be specific.
         return rcs[0]
-    # there were different kinds of warnings
-    return EXIT_WARNING  # generic warning rc, user has to look into the logs
+    # There were different kinds of warnings.
+    return EXIT_WARNING  # generic warning rc; user has to look into the logs
 
 
 def get_reset_ec(ec=None):

+ 19 - 19
src/borg/helpers/datastruct.py

@@ -2,7 +2,7 @@ from .errors import Error
 
 
 class StableDict(dict):
-    """A dict subclass with stable items() ordering"""
+    """A dict subclass with stable items() ordering."""
     def items(self):
         return sorted(super().items())
 
@@ -17,8 +17,8 @@ class Buffer:
 
     def __init__(self, allocator, size=4096, limit=None):
         """
-        Initialize the buffer: use allocator(size) call to allocate a buffer.
-        Optionally, set the upper <limit> for the buffer size.
+        Initialize the buffer: use allocator(size) to allocate a buffer.
+        Optionally set the upper limit for the buffer size via limit.
         """
         assert callable(allocator), 'must give alloc(size) function as first param'
         assert limit is None or size <= limit, 'initial size must be <= limit'
@@ -31,9 +31,9 @@ class Buffer:
 
     def resize(self, size, init=False):
         """
-        resize the buffer - to avoid frequent reallocation, we usually always grow (if needed).
-        giving init=True it is possible to first-time initialize or shrink the buffer.
-        if a buffer size beyond the limit is requested, raise Buffer.MemoryLimitExceeded (OSError).
+        Resize the buffer. To avoid frequent reallocation, we usually grow (if needed).
+        By giving init=True it is possible to first-time initialize or shrink the buffer.
+        If a buffer size beyond the limit is requested, raise Buffer.MemoryLimitExceeded (OSError).
         """
         size = int(size)
         if self.limit is not None and size > self.limit:
@@ -43,7 +43,7 @@ class Buffer:
 
     def get(self, size=None, init=False):
         """
-        return a buffer of at least the requested size (None: any current size).
+        Return a buffer of at least the requested size (None: any current size).
         init=True can be given to trigger shrinking of the buffer to the given size.
         """
         if size is not None:
@@ -57,13 +57,13 @@ class EfficientCollectionQueue:
     """
 
     class SizeUnderflow(Error):
-        """Could not pop_front first {} elements, collection only has {} elements.."""
+        """Could not pop_front the first {} elements; collection only has {} elements."""
 
     def __init__(self, split_size, member_type):
         """
-        Initializes empty queue.
-        Requires split_size to define maximum chunk size.
-        Requires member_type to be type defining what base collection looks like.
+        Initialize an empty queue.
+        Requires split_size to define the maximum chunk size.
+        Requires member_type to be the type defining what the base collection looks like.
         """
         self.buffers = []
         self.size = 0
@@ -72,9 +72,9 @@ class EfficientCollectionQueue:
 
     def peek_front(self):
         """
-        Returns first chunk from queue without removing it.
-        Returned collection will have between 1 and split_size length.
-        Returns empty collection when nothing is queued.
+        Return the first chunk from the queue without removing it.
+        The returned collection will have between 1 and split_size elements.
+        Returns an empty collection when nothing is queued.
         """
         if not self.buffers:
             return self.member_type()
@@ -83,8 +83,8 @@ class EfficientCollectionQueue:
 
     def pop_front(self, size):
         """
-        Removes first size elements from queue.
-        Throws if requested removal size is larger than whole queue.
+        Remove the first size elements from the queue.
+        Raises if the requested removal size is larger than the whole queue.
         """
         if size > self.size:
             raise EfficientCollectionQueue.SizeUnderflow(size, self.size)
@@ -101,8 +101,8 @@ class EfficientCollectionQueue:
 
     def push_back(self, data):
         """
-        Adds data at end of queue.
-        Takes care to chunk data into split_size sized elements.
+        Add data at the end of the queue.
+        Takes care to chunk data into split_size-sized elements.
         """
         if not self.buffers:
             self.buffers = [self.member_type()]
@@ -126,6 +126,6 @@ class EfficientCollectionQueue:
 
     def __bool__(self):
         """
-        Returns true if queue isn't empty.
+        Return True if the queue is not empty.
         """
         return self.size != 0

+ 5 - 5
src/borg/helpers/errors.py

@@ -12,8 +12,8 @@ class ErrorBase(Exception):
     """ErrorBase: {}"""
     # Error base class
 
-    # if we raise such an Error and it is only caught by the uppermost
-    # exception handler (that exits short after with the given exit_code),
+    # If we raise such an Error and it is only caught by the uppermost
+    # exception handler (that exits shortly after with the given exit_code),
     # it is always a (fatal and abrupt) error, never just a warning.
     exit_mcode = EXIT_ERROR  # modern, more specific exit code (defaults to EXIT_ERROR)
 
@@ -31,7 +31,7 @@ class ErrorBase(Exception):
 
     @property
     def exit_code(self):
-        # legacy: borg used to always use rc 2 (EXIT_ERROR) for all errors.
+        # legacy: Borg used to always use rc 2 (EXIT_ERROR) for all errors.
         # modern: users can opt in to more specific return codes, using BORG_EXIT_CODES:
         return self.exit_mcode if modern_ec else EXIT_ERROR
 
@@ -88,7 +88,7 @@ class BorgWarning:
 
     @property
     def exit_code(self):
-        # legacy: borg used to always use rc 1 (EXIT_WARNING) for all warnings.
+        # legacy: Borg used to always use rc 1 (EXIT_WARNING) for all warnings.
         # modern: users can opt in to more specific return codes, using BORG_EXIT_CODES:
         return self.exit_mcode if modern_ec else EXIT_WARNING
 
@@ -105,7 +105,7 @@ class IncludePatternNeverMatchedWarning(BorgWarning):
 
 class BackupWarning(BorgWarning):
     """{}: {}"""
-    # this is to wrap a caught BackupError exception, so it can be given to print_warning_instance
+    # This is to wrap a caught BackupError exception so it can be given to print_warning_instance.
 
     @property
     def exit_code(self):

+ 15 - 14
src/borg/helpers/fs.py

@@ -20,14 +20,15 @@ logger = create_logger()
 
 def ensure_dir(path, mode=stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, pretty_deadly=True):
     """
-    Ensures that the dir exists with the right permissions.
-    1) Make sure the directory exists in a race-free operation
+    Ensure that the directory exists with the right permissions.
+    1) Make sure the directory exists in a race-free operation.
     2) If mode is not None and the directory has been created, give the right
-    permissions to the leaf directory. The current umask value is masked out first.
-    3) If pretty_deadly is True, catch exceptions, reraise them with a pretty
-    message.
-    Returns if the directory has been created and has the right permissions,
-    An exception otherwise. If a deadly exception happened it is reraised.
+       permissions to the leaf directory. The current umask value is masked out first.
+    3) If pretty_deadly is True, catch exceptions and re-raise them with a clearer
+       message.
+
+    Returns normally if the directory exists (or was created) and has the right permissions;
+    raises an exception otherwise. If a fatal exception happened, it is re-raised.
     """
     try:
         os.makedirs(path, mode=mode, exist_ok=True)
@@ -57,7 +58,7 @@ def get_base_dir():
 
 
 def get_keys_dir():
-    """Determine where to repository keys and cache"""
+    """Determine where to store repository keys."""
     keys_dir = os.environ.get('BORG_KEYS_DIR')
     if keys_dir is None:
         # note: do not just give this as default to the environment.get(), see issue #5979.
@@ -79,7 +80,7 @@ def get_security_dir(repository_id=None):
 
 
 def get_cache_dir():
-    """Determine where to repository keys and cache"""
+    """Determine where to store the cache."""
     # Get cache home path
     cache_home = os.path.join(get_base_dir(), '.cache')
     # Try to use XDG_CACHE_HOME instead if BORG_BASE_DIR isn't explicitly set
@@ -103,7 +104,7 @@ def get_cache_dir():
 
 
 def get_config_dir():
-    """Determine where to store whole config"""
+    """Determine where to store the whole configuration."""
     # Get config home path
     config_home = os.path.join(get_base_dir(), '.config')
     # Try to use XDG_CONFIG_HOME instead if BORG_BASE_DIR isn't explicitly set
@@ -157,7 +158,7 @@ _safe_re = re.compile(r'^((\.\.)?/+)+')
 
 
 def make_path_safe(path):
-    """Make path safe by making it relative and local
+    """Make path safe by making it relative and local.
     """
     return _safe_re.sub('', path) or '.'
 
@@ -178,7 +179,7 @@ def get_strip_prefix(path):
 
 
 def hardlinkable(mode):
-    """return True if we support hardlinked items of this type"""
+    """Return True if we support hardlinked items of this type."""
     return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
 
 
@@ -282,7 +283,7 @@ flags_dir = O_('DIRECTORY', 'RDONLY', 'NOFOLLOW')
 
 def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
     """
-    Use os.open to open a fs item.
+    Use os.open to open a filesystem item.
 
     If parent_fd and name are given, they are preferred and openat will be used,
     path is not used in this case.
@@ -328,7 +329,7 @@ def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False):
 
 def os_stat(*, path=None, parent_fd=None, name=None, follow_symlinks=False):
     """
-    Use os.stat to open a fs item.
+    Use os.stat to stat a filesystem item.
 
     If parent_fd and name are given, they are preferred and statat will be used,
     path is not used in this case.

+ 8 - 8
src/borg/helpers/manifest.py

@@ -19,7 +19,7 @@ from ..constants import *  # NOQA
 
 
 class MandatoryFeatureUnsupported(Error):
-    """Unsupported repository feature(s) {}. A newer version of borg is required to access this repository."""
+    """Unsupported repository feature(s) {}. A newer version of Borg is required to access this repository."""
     exit_mcode = 25
 
 
@@ -79,16 +79,16 @@ class Archives(abc.MutableMapping):
 
     def list(self, *, glob=None, match_end=r'\Z', sort_by=(), consider_checkpoints=True, first=None, last=None, reverse=False):
         """
-        Return list of ArchiveInfo instances according to the parameters.
+        Return a list of ArchiveInfo instances according to the parameters.
 
         First match *glob* (considering *match_end*), then *sort_by*.
         Apply *first* and *last* filters, and then possibly *reverse* the list.
 
         *sort_by* is a list of sort keys applied in reverse order.
 
-        Note: for better robustness, all filtering / limiting parameters must default to
-              "not limit / not filter", so a FULL archive list is produced by a simple .list().
-              some callers EXPECT to iterate over all archives in a repo for correct operation.
+        Note: For better robustness, all filtering/limiting parameters must default to
+              "not limit / not filter", so a full archive list is produced by a simple .list().
+              Some callers expect to iterate over all archives in a repo for correct operation.
         """
         if isinstance(sort_by, (str, bytes)):
             raise TypeError('sort_by must be a sequence of str')
@@ -108,7 +108,7 @@ class Archives(abc.MutableMapping):
 
     def list_considering(self, args):
         """
-        get a list of archives, considering --first/last/prefix/glob-archives/sort/consider-checkpoints cmdline args
+        Get a list of archives, considering --first/last/prefix/glob-archives/sort/consider-checkpoints command-line arguments.
         """
         if args.location.archive:
             raise Error('The options --first, --last, --prefix, and --glob-archives, and --consider-checkpoints can only be used on repository targets.')
@@ -117,14 +117,14 @@ class Archives(abc.MutableMapping):
         return self.list(sort_by=args.sort_by.split(','), consider_checkpoints=args.consider_checkpoints, glob=args.glob_archives, first=args.first, last=args.last)
 
     def set_raw_dict(self, d):
-        """set the dict we get from the msgpack unpacker"""
+        """Set the dict we get from the msgpack unpacker."""
         for k, v in d.items():
             assert isinstance(k, bytes)
             assert isinstance(v, dict) and b'id' in v and b'time' in v
             self._archives[k] = v
 
     def get_raw_dict(self):
-        """get the dict we can give to the msgpack packer"""
+        """Get the dict we can give to the msgpack packer."""
         return self._archives
 
 

+ 14 - 14
src/borg/helpers/misc.py

@@ -41,21 +41,21 @@ def default_period_func(pattern):
 def quarterly_13weekly_period_func(a):
     (year, week, _) = to_localtime(a.ts).isocalendar()
     if week <= 13:
-        # Weeks containing Jan 4th to Mar 28th (leap year) or 29th- 91 (13*7)
+        # Weeks containing Jan 4th to Mar 28th (leap year) or 29th 91 (13*7)
         # days later.
         return (year, 1)
     elif 14 <= week <= 26:
-        # Weeks containing Apr 4th (leap year) or 5th to Jun 27th or 28th- 91
+        # Weeks containing Apr 4th (leap year) or 5th to Jun 27th or 28th 91
         # days later.
         return (year, 2)
     elif 27 <= week <= 39:
-        # Weeks containing Jul 4th (leap year) or 5th to Sep 26th or 27th-
+        # Weeks containing Jul 4th (leap year) or 5th to Sep 26th or 27th
         # at least 91 days later.
         return (year, 3)
     else:
-        # Everything else, Oct 3rd (leap year) or 4th onward, will always
-        # include week of Dec 26th (leap year) or Dec 27th, may also include
-        # up to possibly Jan 3rd of next year.
+        # Everything else: Oct 3rd (leap year) or 4th onward; will always
+        # include the week of Dec 26th (leap year) or Dec 27th and may also include
+        # up to Jan 3rd of next year.
         return (year, 4)
 
 
@@ -155,9 +155,9 @@ def sysinfo():
 
 def log_multi(*msgs, level=logging.INFO, logger=logger):
     """
-    log multiple lines of text, each line by a separate logging call for cosmetic reasons
+    Log multiple lines of text, each line via a separate logging call for cosmetic reasons.
 
-    each positional argument may be a single or multiple lines (separated by newlines) of text.
+    Each positional argument may be a single or multiple lines (separated by newlines) of text.
     """
     lines = []
     for msg in msgs:
@@ -171,14 +171,14 @@ def normalize_chunker_params(cp):
     if isinstance(cp, list):
         cp = tuple(cp)
     if len(cp) == 4 and isinstance(cp[0], int):
-        # this is a borg < 1.2 chunker_params tuple, no chunker algo specified, but we only had buzhash:
+        # This is a Borg < 1.2 chunker_params tuple: no chunker algorithm specified, but we only had buzhash.
         cp = (CH_BUZHASH, ) + cp
     assert cp[0] in (CH_BUZHASH, CH_FIXED)
     return cp
 
 
 class ChunkIteratorFileWrapper:
-    """File-like wrapper for chunk iterators"""
+    """File-like wrapper for chunk iterators."""
 
     def __init__(self, chunk_iterator, read_callback=None):
         """
@@ -245,13 +245,13 @@ def chunkit(it, size):
 
 
 def consume(iterator, n=None):
-    """Advance the iterator n-steps ahead. If n is none, consume entirely."""
+    """Advance the iterator n steps ahead. If n is None, consume entirely."""
     # Use functions that consume iterators at C speed.
     if n is None:
-        # feed the entire iterator into a zero-length deque
+        # Feed the entire iterator into a zero-length deque.
         deque(iterator, maxlen=0)
     else:
-        # advance to the empty slice starting at position n
+        # Advance to the empty slice starting at position n.
         next(islice(iterator, n, n), None)
 
 
@@ -280,7 +280,7 @@ class ErrorIgnoringTextIOWrapper(io.TextIOWrapper):
 
 
 def iter_separated(fd, sep=None, read_size=4096):
-    """Iter over chunks of open file ``fd`` delimited by ``sep``. Doesn't trim."""
+    """Iterate over chunks of the open file ``fd`` delimited by ``sep``. Does not trim."""
     buf = fd.read(read_size)
     is_str = isinstance(buf, str)
     part = '' if is_str else b''

+ 2 - 2
src/borg/helpers/msgpack.py

@@ -33,11 +33,11 @@ version = mp_version
 
 
 class PackException(Exception):
-    """Exception while msgpack packing"""
+    """Exception while msgpack packing."""
 
 
 class UnpackException(Exception):
-    """Exception while msgpack unpacking"""
+    """Exception while msgpack unpacking."""
 
 
 class Packer(mp_Packer):

+ 5 - 5
src/borg/helpers/parseformat.py

@@ -42,26 +42,26 @@ def hex_to_bin(hex, length=None):
 
 
 def safe_decode(s, coding='utf-8', errors='surrogateescape'):
-    """decode bytes to str, with round-tripping "invalid" bytes"""
+    """Decode bytes to str, with round-tripping of "invalid" bytes."""
     if s is None:
         return None
     return s.decode(coding, errors)
 
 
 def safe_encode(s, coding='utf-8', errors='surrogateescape'):
-    """encode str to bytes, with round-tripping "invalid" bytes"""
+    """Encode str to bytes, with round-tripping of "invalid" bytes."""
     if s is None:
         return None
     return s.encode(coding, errors)
 
 
 def remove_surrogates(s, errors='replace'):
-    """Replace surrogates generated by fsdecode with '?'"""
+    """Replace surrogates generated by fsdecode with '?'."""
     return s.encode('utf-8', errors).decode('utf-8')
 
 
 def eval_escapes(s):
-    """Evaluate literal escape sequences in a string (eg `\\n` -> `\n`)."""
+    """Evaluate literal escape sequences in a string (e.g., `\\n` -> `\n`)."""
     return s.encode('ascii', 'backslashreplace').decode('unicode-escape')
 
 
@@ -73,7 +73,7 @@ def decode_dict(d, keys, encoding='utf-8', errors='surrogateescape'):
 
 
 def positive_int_validator(value):
-    """argparse type for positive integers"""
+    """Argparse type for positive integers."""
     int_value = int(value)
     if int_value <= 0:
         raise argparse.ArgumentTypeError('A positive integer is required: %s' % value)

+ 2 - 2
src/borg/helpers/process.py

@@ -53,9 +53,9 @@ def _daemonize():
 
 
 def daemonize():
-    """Detach process from controlling terminal and run in background
+    """Detach process from controlling terminal and run in background.
 
-    Returns: old and new get_process_id tuples
+    Returns: old and new get_process_id tuples.
     """
     with _daemonize() as (old_id, new_id):
         return old_id, new_id

+ 6 - 6
src/borg/helpers/progress.py

@@ -12,7 +12,7 @@ from .parseformat import ellipsis_truncate
 
 def justify_to_terminal_size(message):
     terminal_space = get_terminal_size(fallback=(-1, -1))[0]
-    # justify only if we are outputting to a terminal
+    # Justify only if we are outputting to a terminal.
     if terminal_space != -1:
         return message.ljust(terminal_space)
     return message
@@ -110,12 +110,12 @@ class ProgressIndicatorPercent(ProgressIndicatorBase):
 
     def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None):
         """
-        Percentage-based progress indicator
+        Percentage-based progress indicator.
 
-        :param total: total amount of items
-        :param step: step size in percent
-        :param start: at which percent value to start
-        :param msg: output message, must contain one %f placeholder for the percentage
+        :param total: Total number of items.
+        :param step: Step size in percent.
+        :param start: At which percentage value to start.
+        :param msg: Output message; must contain one %f placeholder for the percentage.
         """
         self.counter = 0  # 0 .. (total-1)
         self.total = total

+ 14 - 16
src/borg/helpers/time.py

@@ -6,17 +6,17 @@ from ..constants import ISO_FORMAT, ISO_FORMAT_NO_USECS
 
 
 def to_localtime(ts):
-    """Convert datetime object from UTC to local time zone"""
+    """Convert a datetime object from UTC to the local time zone."""
     return datetime(*time.localtime((ts - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())[:6])
 
 
 def utcnow():
-    """Returns a naive datetime instance representing the time in the UTC timezone"""
+    """Return a naive datetime instance representing the time in the UTC time zone."""
     return datetime.now(timezone.utc).replace(tzinfo=None)
 
 
 def parse_timestamp(timestamp, tzinfo=timezone.utc):
-    """Parse a ISO 8601 timestamp string"""
+    """Parse an ISO 8601 timestamp string."""
     fmt = ISO_FORMAT if '.' in timestamp else ISO_FORMAT_NO_USECS
     dt = datetime.strptime(timestamp, fmt)
     if tzinfo is not None:
@@ -25,13 +25,13 @@ def parse_timestamp(timestamp, tzinfo=timezone.utc):
 
 
 def timestamp(s):
-    """Convert a --timestamp=s argument to a datetime object"""
+    """Convert a --timestamp=s argument to a datetime object."""
     try:
-        # is it pointing to a file / directory?
+        # Is it pointing to a file/directory?
         ts = safe_s(os.stat(s).st_mtime)
         return datetime.fromtimestamp(ts, tz=timezone.utc)
     except OSError:
-        # didn't work, try parsing as timestamp. UTC, no TZ, no microsecs support.
+        # Didn't work; try parsing as a timestamp. UTC, no time zone, no microseconds support.
         for format in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S+00:00',
                        '%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S',
                        '%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M',
@@ -48,24 +48,24 @@ def timestamp(s):
 # As they are crap anyway (valid filesystem timestamps always refer to the past up to
 # the present, but never to the future), nothing is lost if we just clamp them to the
 # maximum value we can support.
-# As long as people are using borg on 32bit platforms to access borg archives, we must
-# keep this value True. But we can expect that we can stop supporting 32bit platforms
+# As long as people are using Borg on 32-bit platforms to access Borg archives, we must
+# keep this value True. But we can expect that we can stop supporting 32-bit platforms
 # well before coming close to the year 2038, so this will never be a practical problem.
 SUPPORT_32BIT_PLATFORMS = True  # set this to False before y2038.
 
 if SUPPORT_32BIT_PLATFORMS:
     # second timestamps will fit into a signed int32 (platform time_t limit).
     # nanosecond timestamps thus will naturally fit into a signed int64.
-    # subtract last 48h to avoid any issues that could be caused by tz calculations.
-    # this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
+    # Subtract the last 48 h to avoid any issues that could be caused by time zone calculations.
+    # This is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
     # msgpack can pack up to uint64.
     MAX_S = 2**31-1 - 48*3600
     MAX_NS = MAX_S * 1000000000
 else:
     # nanosecond timestamps will fit into a signed int64.
-    # subtract last 48h to avoid any issues that could be caused by tz calculations.
-    # this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
-    # round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S.
+    # Subtract the last 48 h to avoid any issues that could be caused by time zone calculations.
+    # This is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
+    # Round down to a 1e9 multiple so MAX_NS corresponds precisely to an integer MAX_S.
     # msgpack can pack up to uint64.
     MAX_NS = (2**63-1 - 48*3600*1000000000) // 1000000000 * 1000000000
     MAX_S = MAX_NS // 1000000000
@@ -95,9 +95,7 @@ def safe_timestamp(item_timestamp_ns):
 
 
 def format_time(ts: datetime, format_spec=''):
-    """
-    Convert *ts* to a human-friendly format with textual weekday.
-    """
+    """Convert *ts* to a human-friendly format with textual weekday."""
     return ts.strftime('%a, %Y-%m-%d %H:%M:%S' if format_spec == '' else format_spec)
 
 

+ 28 - 28
src/borg/helpers/yes.py

@@ -17,34 +17,34 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
         falsish=FALSISH, truish=TRUISH, defaultish=DEFAULTISH,
         default=False, retry=True, env_var_override=None, ofile=None, input=input, prompt=True,
         msgid=None):
-    """Output <msg> (usually a question) and let user input an answer.
-    Qualifies the answer according to falsish, truish and defaultish as True, False or <default>.
-    If it didn't qualify and retry is False (no retries wanted), return the default [which
-    defaults to False]. If retry is True let user retry answering until answer is qualified.
+    """Output <msg> (usually a question) and let the user input an answer.
+    Qualifies the answer according to falsish, truish, and defaultish as True, False, or <default>.
+    If it does not qualify and retry is False (no retries wanted), return the default [which
+    defaults to False]. If retry is True, let the user retry answering until the answer is qualified.
 
-    If env_var_override is given and this var is present in the environment, do not ask
-    the user, but just use the env var contents as answer as if it was typed in.
-    Otherwise read input from stdin and proceed as normal.
-    If EOF is received instead an input or an invalid input without retry possibility,
+    If env_var_override is given and this variable is present in the environment, do not ask
+    the user, but use the environment variable's contents as the answer as if it were typed in.
+    Otherwise, read input from stdin and proceed as normal.
+    If EOF is received instead of input, or an invalid input occurs without the possibility to retry,
     return default.
 
-    :param msg: introducing message to output on ofile, no \n is added [None]
-    :param retry_msg: retry message to output on ofile, no \n is added [None]
-    :param false_msg: message to output before returning False [None]
-    :param true_msg: message to output before returning True [None]
-    :param default_msg: message to output before returning a <default> [None]
-    :param invalid_msg: message to output after a invalid answer was given [None]
-    :param env_msg: message to output when using input from env_var_override ['{} (from {})'],
-           needs to have 2 placeholders for answer and env var name
-    :param falsish: sequence of answers qualifying as False
-    :param truish: sequence of answers qualifying as True
-    :param defaultish: sequence of answers qualifying as <default>
-    :param default: default return value (defaultish answer was given or no-answer condition) [False]
-    :param retry: if True and input is incorrect, retry. Otherwise return default. [True]
-    :param env_var_override: environment variable name [None]
-    :param ofile: output stream [sys.stderr]
-    :param input: input function [input from builtins]
-    :return: boolean answer value, True or False
+    :param msg: introductory message to output on ofile; no \n is added. [None]
+    :param retry_msg: retry message to output on ofile; no \n is added. [None]
+    :param false_msg: message to output before returning False. [None]
+    :param true_msg: message to output before returning True. [None]
+    :param default_msg: message to output before returning the default value. [None]
+    :param invalid_msg: message to output after an invalid answer was given. [None]
+    :param env_msg: message to output when using input from env_var_override ['{} (from {})'];
+           needs to have two placeholders for the answer and the environment variable name.
+    :param falsish: sequence of answers qualifying as False.
+    :param truish: sequence of answers qualifying as True.
+    :param defaultish: sequence of answers qualifying as <default>.
+    :param default: default return value (defaultish answer was given or no-answer condition). [False]
+    :param retry: If True and input is incorrect, retry; otherwise return default. [True]
+    :param env_var_override: environment variable name. [None]
+    :param ofile: output stream. [sys.stderr]
+    :param input: input function. [builtins.input]
+    :return: boolean answer value, True or False.
     """
     def output(msg, msg_type, is_prompt=False, **kwargs):
         json_output = getattr(logging.getLogger('borg'), 'json', False)
@@ -62,8 +62,8 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
                 print(msg, file=ofile)
 
     msgid = msgid or env_var_override
-    # note: we do not assign sys.stderr as default above, so it is
-    # really evaluated NOW,  not at function definition time.
+    # Note: We do not assign sys.stderr as the default above, so it is
+    # evaluated now, not at function definition time.
     if ofile is None:
         ofile = sys.stderr
     if default not in (True, False):
@@ -84,7 +84,7 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
                 if answer == ERROR:  # for testing purposes
                     raise UnicodeDecodeError("?", b"?", 0, 1, "?")  # args don't matter
             except EOFError:
-                # avoid defaultish[0], defaultish could be empty
+                # Avoid defaultish[0]; defaultish could be empty.
                 answer = truish[0] if default else falsish[0]
             except UnicodeDecodeError:
                 answer = ERROR

+ 17 - 17
src/borg/locking.py

@@ -24,10 +24,10 @@ class TimeoutTimer:
         """
         Initialize a timer.
 
-        :param timeout: time out interval [s] or None (never timeout, wait forever) [default]
-        :param sleep: sleep interval [s] (>= 0: do sleep call, <0: don't call sleep)
-                      or None (autocompute: use 10% of timeout [but not more than 60s],
-                      or 1s for "never timeout" mode)
+        :param timeout: timeout interval [s] or None (never time out, wait forever). [default]
+        :param sleep: sleep interval [s] (>= 0: do sleep; < 0: do not call sleep),
+                      or None (auto-compute: use 10% of timeout, but not more than 60 s;
+                      or 1 s for "never timeout" mode).
         """
         if timeout is not None and timeout < 0:
             raise ValueError("timeout must be >= 0")
@@ -98,9 +98,9 @@ class NotMyLock(LockErrorT):
 
 
 class ExclusiveLock:
-    """An exclusive Lock based on mkdir fs operation being atomic.
+    """An exclusive lock based on the mkdir filesystem operation being atomic.
 
-    If possible, try to use the contextmanager here like::
+    If possible, try to use the context manager here like::
 
         with ExclusiveLock(...) as lock:
             ...
@@ -212,7 +212,7 @@ class ExclusiveLock:
 
                 if not self.kill_stale_locks:
                     if not self.stale_warning_printed:
-                        # Log this at warning level to hint the user at the ability
+                        # Log this at warning level to hint to the user about the ability
                         logger.warning("Found stale lock %s, but not deleting because self.kill_stale_locks = False.", name)
                         self.stale_warning_printed = True
                     return False
@@ -246,7 +246,7 @@ class ExclusiveLock:
             os.rmdir(self.path)
 
     def migrate_lock(self, old_id, new_id):
-        """migrate the lock ownership from old_id to new_id"""
+        """Migrate the lock ownership from old_id to new_id."""
         assert self.id == old_id
         new_unique_name = os.path.join(self.path, "%s.%d-%x" % new_id)
         if self.is_locked() and self.by_me():
@@ -319,8 +319,8 @@ class LockRoster:
         if op == ADD:
             elements.add(self.id)
         elif op == REMOVE:
-            # note: we ignore it if the element is already not present anymore.
-            # this has been frequently seen in teardowns involving Repository.__del__ and Repository.__exit__.
+            # Note: We ignore it if the element is already not present anymore.
+            # This has been frequently seen in teardowns involving Repository.__del__ and Repository.__exit__.
             elements.discard(self.id)
         elif op == REMOVE2:
             # needed for callers that do not want to ignore.
@@ -331,7 +331,7 @@ class LockRoster:
         self.save(roster)
 
     def migrate_lock(self, key, old_id, new_id):
-        """migrate the lock ownership from old_id to new_id"""
+        """Migrate the lock ownership from old_id to new_id."""
         assert self.id == old_id
         # need to temporarily switch off stale lock killing as we want to
         # rather migrate than kill them (at least the one made by old_id).
@@ -352,12 +352,12 @@ class LockRoster:
 
 class Lock:
     """
-    A Lock for a resource that can be accessed in a shared or exclusive way.
+    A lock for a resource that can be accessed in a shared or exclusive way.
     Typically, write access to a resource needs an exclusive lock (1 writer,
     no one is allowed reading) and read access to a resource needs a shared
     lock (multiple readers are allowed).
 
-    If possible, try to use the contextmanager here like::
+    If possible, try to use the context manager here like::
 
         with Lock(...) as lock:
             ...
@@ -371,10 +371,10 @@ class Lock:
         self.sleep = sleep
         self.timeout = timeout
         self.id = id or platform.get_process_id()
-        # globally keeping track of shared and exclusive lockers:
+        # Globally keep track of shared and exclusive lockers:
         self._roster = LockRoster(path + '.roster', id=id)
-        # an exclusive lock, used for:
-        # - holding while doing roster queries / updates
+        # An exclusive lock, used for:
+        # - holding while doing roster queries/updates
         # - holding while the Lock itself is exclusive
         self._lock = ExclusiveLock(path + '.exclusive', id=id, timeout=timeout)
 
@@ -415,7 +415,7 @@ class Lock:
                 if remove is not None:
                     self._roster.modify(remove, ADD)
             except:
-                # avoid orphan lock when an exception happens here, e.g. Ctrl-C!
+                # Avoid an orphan lock when an exception happens here (e.g., Ctrl-C)!
                 self._lock.release()
                 raise
             else:

+ 29 - 29
src/borg/logger.py

@@ -1,33 +1,33 @@
-"""logging facilities
+"""Logging facilities.
 
-The way to use this is as follows:
+How to use:
 
-* each module declares its own logger, using:
+- Each module declares its own logger, using:
 
     from .logger import create_logger
     logger = create_logger()
 
-* then each module uses logger.info/warning/debug/etc according to the
+- Then each module uses logger.info/warning/debug/etc. according to the
   level it believes is appropriate:
 
     logger.debug('debugging info for developers or power users')
     logger.info('normal, informational output')
-    logger.warning('warn about a non-fatal error or sth else')
+    logger.warning('warn about a non-fatal error or something else')
     logger.error('a fatal error')
 
-  ... and so on. see the `logging documentation
+  See the `logging documentation
   <https://docs.python.org/3/howto/logging.html#when-to-use-logging>`_
-  for more information
+  for more information.
 
-* console interaction happens on stderr, that includes interactive
-  reporting functions like `help`, `info` and `list`
+- Console interaction happens on stderr; that includes interactive
+  reporting functions like `help`, `info`, and `list`.
 
-* ...except ``input()`` is special, because we can't control the
-  stream it is using, unfortunately. we assume that it won't clutter
-  stdout, because interaction would be broken then anyways
+- ...except ``input()`` is special, because we cannot control the
+  stream it uses. We assume that it will not clutter stdout, because
+  interaction would be broken otherwise.
 
-* what is output on INFO level is additionally controlled by commandline
-  flags
+- What is output at the INFO level is additionally controlled by command-line
+  flags.
 """
 
 import inspect
@@ -60,16 +60,16 @@ def _log_warning(message, category, filename, lineno, file=None, line=None):
 
 
 def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False):
-    """setup logging module according to the arguments provided
+    """Set up the logging module according to the provided arguments.
 
-    if conf_fname is given (or the config file name can be determined via
-    the env_var, if given): load this logging configuration.
+    If conf_fname is given (or the config file name can be determined via
+    env_var, if given), load that logging configuration.
 
-    otherwise, set up a stream handler logger on stderr (by default, if no
+    Otherwise, set up a stream handler logger on stderr (by default, if no
     stream is provided).
 
-    if is_serve == True, we configure a special log format as expected by
-    the borg client log message interceptor.
+    If is_serve is True, configure a special log format as expected by
+    the Borg client log message interceptor.
     """
     global configured
     err_msg = None
@@ -123,9 +123,9 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
 
 
 def find_parent_module():
-    """find the name of the first module calling this module
+    """Find the name of the first module calling this module.
 
-    if we cannot find it, we return the current module's name
+    If it cannot be found, return the current module's name
     (__name__) instead.
     """
     try:
@@ -142,18 +142,18 @@ def find_parent_module():
 
 
 def create_logger(name=None):
-    """lazily create a Logger object with the proper path, which is returned by
-    find_parent_module() by default, or is provided via the commandline
+    """Lazily create a Logger object with the proper path, which is returned by
+    find_parent_module() by default, or is provided via the command-line.
 
-    this is really a shortcut for:
+    This is really a shortcut for:
 
         logger = logging.getLogger(__name__)
 
-    we use it to avoid errors and provide a more standard API.
+    We use it to avoid errors and provide a more standard API.
 
-    We must create the logger lazily, because this is usually called from
-    module level (and thus executed at import time - BEFORE setup_logging()
-    was called). By doing it lazily we can do the setup first, we just have to
+    We must create the logger lazily because this is usually called from
+    module level (and thus executed at import time—before setup_logging()
+    is called). By doing it lazily we can do the setup first; we just have to
     be careful not to call any logger methods before the setup_logging() call.
     If you try, you'll get an exception.
     """

+ 2 - 2
src/borg/lrucache.py

@@ -40,8 +40,8 @@ class LRUCache:
         return value
 
     def upd(self, key, value):
-        # special use only: update the value for an existing key without having to dispose it first
-        # this method complements __setitem__ which should be used for the normal use case.
+        # Special use only: update the value for an existing key without having to dispose it first.
+        # This method complements __setitem__, which should be used for the normal use case.
         assert key in self._cache, "Unexpected attempt to update a non-existing item."
         self._cache[key] = value
 

+ 5 - 5
src/borg/nanorst.py

@@ -50,10 +50,10 @@ def process_directive(directive, arguments, out, state_hook):
 
 def rst_to_text(text, state_hook=None, references=None):
     """
-    Convert rST to a more human text form.
+    Convert reStructuredText (rST) to a more human-readable text form.
 
     This is a very loose conversion. No advanced rST features are supported.
-    The generated output directly depends on the input (e.g. indentation of
+    The generated output directly depends on the input (e.g., indentation of
     admonitions).
     """
     state_hook = state_hook or (lambda old_state, new_state, out: None)
@@ -90,7 +90,7 @@ def rst_to_text(text, state_hook=None, references=None):
                     text.read(1)
                     continue
                 if text.peek(-1).isspace() and char == ':' and text.peek(5) == 'ref:`':
-                    # translate reference
+                    # Translate reference
                     text.read(5)
                     ref = ''
                     while True:
@@ -108,7 +108,7 @@ def rst_to_text(text, state_hook=None, references=None):
                         raise ValueError("Undefined reference in Archiver help: %r — please add reference "
                                          "substitution to 'rst_plain_text_references'" % ref)
                     continue
-                if char == ':' and text.peek(2) == ':\n':  # End of line code block
+                if char == ':' and text.peek(2) == ':\n':  # End-of-line code block
                     text.read(2)
                     state_hook(state, 'code-block', out)
                     state = 'code-block'
@@ -204,7 +204,7 @@ def rst_to_terminal(rst, references=None, destination=sys.stdout):
     Convert *rst* to a lazy string.
 
     If *destination* is a file-like object connected to a terminal,
-    enrich text with suitable ANSI escapes. Otherwise return plain text.
+    enrich the text with suitable ANSI escapes. Otherwise, return plain text.
     """
     if is_terminal(destination):
         rst_state_hook = ansi_escapes

+ 10 - 10
src/borg/patterns.py

@@ -20,7 +20,7 @@ def parse_patternfile_line(line, roots, ie_commands, fallback):
     elif ie_command.cmd is IECommand.PatternStyle:
         fallback = ie_command.val
     else:
-        # it is some kind of include/exclude command
+        # It is some kind of include/exclude command.
         ie_commands.append(ie_command)
     return fallback
 
@@ -51,7 +51,7 @@ class ArgparsePatternFileAction(argparse.Action):
 
     def __call__(self, parser, args, values, option_string=None):
         """Load and parse patterns from a file.
-        Lines empty or starting with '#' after stripping whitespace on both line ends are ignored.
+        Empty lines or lines starting with '#' (after stripping whitespace at both ends) are ignored.
         """
         filename = values[0]
         try:
@@ -81,7 +81,7 @@ class PatternMatcher:
         # Value to return from match function when none of the patterns match.
         self.fallback = fallback
 
-        # optimizations
+        # Optimizations
         self._path_full_patterns = {}  # full path -> return value
 
         # indicates whether the last match() call ended on a pattern for which
@@ -89,13 +89,13 @@ class PatternMatcher:
         # False when calling match().
         self.recurse_dir = None
 
-        # whether to recurse into directories when no match is found
+        # Whether to recurse into directories when no match is found
         # TODO: allow modification as a config option?
         self.recurse_dir_default = True
 
         self.include_patterns = []
 
-        # TODO: move this info to parse_inclexcl_command and store in PatternBase subclass?
+        # TODO: Move this info to parse_inclexcl_command and store it in a PatternBase subclass?
         self.is_include_cmd = {
             IECommand.Exclude: False,
             IECommand.ExcludeNoRecurse: False,
@@ -151,28 +151,28 @@ class PatternMatcher:
 
         """
         path = normalize_path(path).lstrip(os.path.sep)
-        # do a fast lookup for full path matches (note: we do not count such matches):
+        # Do a fast lookup for full path matches (note: we do not count such matches):
         non_existent = object()
         value = self._path_full_patterns.get(path, non_existent)
 
         if value is not non_existent:
-            # we have a full path match!
+            # We have a full path match!
             self.recurse_dir = command_recurses_dir(value)
             return self.is_include_cmd[value]
 
-        # this is the slow way, if we have many patterns in self._items:
+        # This is the slow path if we have many patterns in self._items:
         for (pattern, cmd) in self._items:
             if pattern.match(path, normalize=False):
                 self.recurse_dir = pattern.recurse_dir
                 return self.is_include_cmd[cmd]
 
-        # by default we will recurse if there is no match
+        # By default we will recurse if there is no match
         self.recurse_dir = self.recurse_dir_default
         return self.fallback
 
 
 def normalize_path(path):
-    """normalize paths for MacOS (but do nothing on other platforms)"""
+    """Normalize paths for macOS (no-op on other platforms)."""
     # HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
     # Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
     return unicodedata.normalize('NFD', path) if sys.platform == 'darwin' else path

+ 5 - 5
src/borg/platform/__init__.py

@@ -17,7 +17,7 @@ OS_API_VERSION = API_VERSION
 
 if not is_win32:
     from .posix import process_alive, local_pid_alive
-    # posix swidth implementation works for: linux, freebsd, darwin, openindiana, cygwin
+    # POSIX swidth implementation works for: Linux, FreeBSD, Darwin, OpenIndiana, Cygwin
     from .posix import swidth
     from .posix import get_errno
     from .posix import uid2user, user2uid, gid2group, group2gid, getosusername
@@ -26,17 +26,17 @@ else:
     from .windows import process_alive, local_pid_alive
     from .windows import uid2user, user2uid, gid2group, group2gid, getosusername
 
-if is_linux:  # pragma: linux only
+if is_linux:  # pragma: Linux only
     from .linux import API_VERSION as OS_API_VERSION
     from .linux import listxattr, getxattr, setxattr
     from .linux import acl_get, acl_set
     from .linux import set_flags, get_flags
     from .linux import SyncFile
-elif is_freebsd:  # pragma: freebsd only
+elif is_freebsd:  # pragma: FreeBSD only
     from .freebsd import API_VERSION as OS_API_VERSION
     from .freebsd import listxattr, getxattr, setxattr
     from .freebsd import acl_get, acl_set
-elif is_darwin:  # pragma: darwin only
+elif is_darwin:  # pragma: Darwin only
     from .darwin import API_VERSION as OS_API_VERSION
     from .darwin import listxattr, getxattr, setxattr
     from .darwin import acl_get, acl_set
@@ -45,7 +45,7 @@ elif is_darwin:  # pragma: darwin only
 
 def get_birthtime_ns(st, path, fd=None):
     if hasattr(st, "st_birthtime_ns"):
-        # added in Python 3.12 but not always available.
+        # Added in Python 3.12 but not always available.
         return st.st_birthtime_ns
     elif is_darwin and is_darwin_feature_64_bit_inode:
         return _get_birthtime_ns(fd or path, follow_symlinks=False)

+ 21 - 21
src/borg/platform/base.py

@@ -7,14 +7,14 @@ from borg.helpers import safe_unlink
 from borg.platformflags import is_win32
 
 """
-platform base module
+Platform base module
 ====================
 
 Contains platform API implementations based on what Python itself provides. More specific
 APIs are stubs in this module.
 
-When functions in this module use platform APIs themselves they access the public
-platform API: that way platform APIs provided by the platform-specific support module
+When functions in this module use platform APIs themselves, they access the public
+platform API; that way, platform APIs provided by the platform-specific support module
 are correctly composed into the base functionality.
 """
 
@@ -45,7 +45,7 @@ def getxattr(path, name, *, follow_symlinks=False):
     *follow_symlinks* indicates whether symlinks should be followed
     and only applies when *path* is not an open file descriptor.
     """
-    # as this base dummy implementation returns [] from listxattr,
+    # As this base dummy implementation returns [] from listxattr,
     # it must raise here for any given name:
     raise OSError(ENOATTR, os.strerror(ENOATTR), path)
 
@@ -55,7 +55,7 @@ def setxattr(path, name, value, *, follow_symlinks=False):
     Write xattr on *path*.
 
     *path* can either be a path (bytes) or an open file descriptor (int).
-    *name* is the name of the xattr to read (bytes).
+    *name* is the name of the xattr to write (bytes).
     *value* is the value to write (bytes).
     *follow_symlinks* indicates whether symlinks should be followed
     and only applies when *path* is not an open file descriptor.
@@ -64,18 +64,18 @@ def setxattr(path, name, value, *, follow_symlinks=False):
 
 def acl_get(path, item, st, numeric_ids=False, fd=None):
     """
-    Saves ACL Entries
+    Save ACL entries.
 
-    If `numeric_ids` is True the user/group field is not preserved only uid/gid
+    If `numeric_ids` is True, the user/group field is not preserved; only uid/gid.
     """
 
 
 def acl_set(path, item, numeric_ids=False, fd=None):
     """
-    Restore ACL Entries
+    Restore ACL entries.
 
-    If `numeric_ids` is True the stored uid/gid is used instead
-    of the user/group names
+    If `numeric_ids` is True, the stored uid/gid is used instead
+    of the user/group names.
     """
 
 
@@ -96,7 +96,7 @@ def get_flags(path, st, fd=None):
 
 def sync_dir(path):
     if is_win32:
-        # Opening directories is not supported on windows.
+        # Opening directories is not supported on Windows.
         # TODO: do we need to handle this in some other way?
         return
     fd = os.open(path, os.O_RDONLY)
@@ -143,7 +143,7 @@ class SyncFile:
 
     Calling SyncFile(path) for an existing path will raise FileExistsError, see comment in __init__.
 
-    TODO: Use F_FULLSYNC on OSX.
+    TODO: Use F_FULLSYNC on macOS.
     TODO: A Windows implementation should use CreateFile with FILE_FLAG_WRITE_THROUGH.
     """
 
@@ -208,7 +208,7 @@ class SaveFile:
     atomically and won't become corrupted, even on power failures or
     crashes (for caveats see SyncFile).
 
-    SaveFile can safely by used in parallel (e.g. by multiple processes) to write
+    SaveFile can safely be used in parallel (e.g. by multiple processes) to write
     to the same target path. Whatever writer finishes last (executes the os.replace
     last) "wins" and has successfully written its content to the target path.
     Internally used temporary files are created in the target directory and are
@@ -249,9 +249,9 @@ class SaveFile:
 
 
 def swidth(s):
-    """terminal output width of string <s>
+    """Terminal output width of string <s>.
 
-    For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used.
+    For Western scripts, this is just len(s), but for CJK glyphs, 2 cells are used.
     """
     return len(s)
 
@@ -277,17 +277,17 @@ def getfqdn(name=''):
     return name
 
 
-# for performance reasons, only determine hostname / fqdn / hostid once.
-# XXX this sometimes requires live internet access for issuing a DNS query in the background.
+# For performance reasons, only determine hostname / FQDN / host ID once.
+# XXX This sometimes requires live internet access for issuing a DNS query in the background.
 hostname = socket.gethostname()
 fqdn = getfqdn(hostname)
-# some people put the fqdn into /etc/hostname (which is wrong, should be the short hostname)
-# fix this (do the same as "hostname --short" cli command does internally):
+# Some people put the FQDN into /etc/hostname (which is wrong; it should be the short hostname).
+# Fix this (do the same as "hostname --short" CLI command does internally):
 hostname = hostname.split('.')[0]
 
-# uuid.getnode() is problematic in some environments (e.g. OpenVZ, see #3968) where the virtual MAC address
+# uuid.getnode() is problematic in some environments (e.g., OpenVZ, see #3968) where the virtual MAC address
 # is all-zero. uuid.getnode falls back to returning a random value in that case, which is not what we want.
-# thus, we offer BORG_HOST_ID where a user can set an own, unique id for each of his hosts.
+# Thus, we offer BORG_HOST_ID where a user can set an own, unique ID for each of his hosts.
 hostid = os.environ.get('BORG_HOST_ID')
 if not hostid:
     hostid = f'{fqdn}@{uuid.getnode()}'

+ 9 - 9
src/borg/platform/xattr.py

@@ -7,7 +7,7 @@ from ..helpers import Buffer
 try:
     ENOATTR = errno.ENOATTR
 except AttributeError:
-    # on some platforms, ENOATTR is missing, use ENODATA there
+    # On some platforms, ENOATTR is missing; use ENODATA there.
     ENOATTR = errno.ENODATA
 
 
@@ -15,14 +15,14 @@ buffer = Buffer(bytearray, limit=2**24)
 
 
 def split_string0(buf):
-    """split a list of zero-terminated strings into python not-zero-terminated bytes"""
+    """Split a list of zero-terminated strings into Python bytes (without terminating zeros)."""
     if isinstance(buf, bytearray):
         buf = bytes(buf)  # use a bytes object, so we return a list of bytes objects
     return buf.split(b'\0')[:-1]
 
 
 def split_lstring(buf):
-    """split a list of length-prefixed strings into python not-length-prefixed bytes"""
+    """Split a list of length-prefixed strings into Python bytes (without length prefixes)."""
     result = []
     mv = memoryview(buf)
     while mv:
@@ -33,7 +33,7 @@ def split_lstring(buf):
 
 
 class BufferTooSmallError(Exception):
-    """the buffer given to a xattr function was too small for the result."""
+    """The buffer given to an xattr function was too small for the result."""
 
 
 def _check(rv, path=None, detect_buffer_too_small=False):
@@ -41,8 +41,8 @@ def _check(rv, path=None, detect_buffer_too_small=False):
     if rv < 0:
         e = get_errno()
         if detect_buffer_too_small and e == errno.ERANGE:
-            # listxattr and getxattr signal with ERANGE that they need a bigger result buffer.
-            # setxattr signals this way that e.g. a xattr key name is too long / inacceptable.
+            # listxattr and getxattr indicate with ERANGE that they need a bigger result buffer.
+            # setxattr indicates this way that, e.g., an xattr key name is too long or unacceptable.
             raise BufferTooSmallError
         else:
             try:
@@ -53,9 +53,9 @@ def _check(rv, path=None, detect_buffer_too_small=False):
                 path = '<FD %d>' % path
             raise OSError(e, msg, path)
     if detect_buffer_too_small and rv >= len(buffer):
-        # freebsd does not error with ERANGE if the buffer is too small,
-        # it just fills the buffer, truncates and returns.
-        # so, we play safe and just assume that result is truncated if
+        # FreeBSD does not error with ERANGE if the buffer is too small;
+        # it just fills the buffer, truncates, and returns.
+        # Therefore, we play it safe and assume the result is truncated if
         # it happens to be a full buffer.
         raise BufferTooSmallError
     return rv

+ 2 - 2
src/borg/platformflags.py

@@ -1,7 +1,7 @@
 """
-Flags for Platform-specific APIs.
+Flags for platform-specific APIs.
 
-Use these Flags instead of sys.platform.startswith('<OS>') or try/except.
+Use these flags instead of sys.platform.startswith('<OS>') or try/except.
 """
 
 import sys

+ 7 - 7
src/borg/remote.py

@@ -47,11 +47,11 @@ RATELIMIT_PERIOD = 0.1
 
 
 def os_write(fd, data):
-    """os.write wrapper so we do not lose data for partial writes."""
-    # TODO: this issue is fixed in cygwin since at least 2.8.0, remove this
-    #       wrapper / workaround when this version is considered ancient.
-    # This is happening frequently on cygwin due to its small pipe buffer size of only 64kiB
-    # and also due to its different blocking pipe behaviour compared to Linux/*BSD.
+    """Wrapper around os.write to avoid data loss on partial writes."""
+    # TODO: This issue is fixed in Cygwin since at least 2.8.0; remove this
+    #       wrapper/workaround when this version is considered ancient.
+    # This happens frequently on Cygwin due to its small pipe buffer size of only 64 KiB
+    # and also due to its different blocking pipe behavior compared to Linux/*BSD.
     # Neither Linux nor *BSD ever do partial writes on blocking pipes, unless interrupted by a
     # signal, in which case serve() would terminate.
     amount = remaining = len(data)
@@ -66,7 +66,7 @@ def os_write(fd, data):
 
 
 class ConnectionClosed(Error):
-    """Connection closed by remote host"""
+    """Connection closed by remote host."""
     exit_mcode = 80
 
 
@@ -81,7 +81,7 @@ class PathNotAllowed(Error):
 
 
 class InvalidRPCMethod(Error):
-    """RPC method {} is not valid"""
+    """RPC method {} is not valid."""
     exit_mcode = 82
 
 

+ 8 - 8
src/borg/repository.py

@@ -51,7 +51,7 @@ FreeSpace = partial(defaultdict, int)
 
 class Repository:
     """
-    Filesystem based transactional key value store
+    Filesystem-based transactional key-value store.
 
     Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
     called segments. Each segment is a series of log entries. The segment number together with the offset of each
@@ -84,9 +84,9 @@ class Repository:
     such obsolete entries is called sparse, while a segment containing no such entries is called compact.
 
     Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
-    superseded entries where current.
+    superseded entries were current.
 
-    On disk layout:
+    On-disk layout:
 
     dir/README
     dir/config
@@ -97,16 +97,16 @@ class Repository:
     File system interaction
     -----------------------
 
-    LoggedIO generally tries to rely on common behaviours across transactional file systems.
+    LoggedIO generally tries to rely on common behaviors across transactional file systems.
 
     Segments that are deleted are truncated first, which avoids problems if the FS needs to
     allocate space to delete the dirent of the segment. This mostly affects CoW file systems,
     traditional journaling file systems have a fairly good grip on this problem.
 
     Note that deletion, i.e. unlink(2), is atomic on every file system that uses inode reference
-    counts, which includes pretty much all of them. To remove a dirent the inodes refcount has
-    to be decreased, but you can't decrease the refcount before removing the dirent nor can you
-    decrease the refcount after removing the dirent. File systems solve this with a lock,
+    counts, which includes pretty much all of them. To remove a dirent the inode's reference count has
+    to be decreased, but you cannot decrease the reference count before removing the dirent nor can you
+    decrease the reference count after removing the dirent. File systems solve this with a lock,
     and by ensuring it all stays within the same FS transaction.
 
     Truncation is generally not atomic in itself, and combining truncate(2) and unlink(2) is of
@@ -115,7 +115,7 @@ class Repository:
     this is of course way more complex).
 
     LoggedIO gracefully handles truncate/unlink splits as long as the truncate resulted in
-    a zero length file. Zero length segments are considered to not exist, while LoggedIO.cleanup()
+    a zero-length file. Zero-length segments are considered to not exist, while LoggedIO.cleanup()
     will still get rid of them.
     """
 

+ 4 - 4
src/borg/selftest.py

@@ -2,16 +2,16 @@
 #       See borg.selftest for details. If you add/remove test methods, update SELFTEST_COUNT
 
 """
-Self testing module
+Self-testing module
 ===================
 
 The selftest() function runs a small test suite of relatively fast tests that are meant to discover issues
 with the way Borg was compiled or packaged and also bugs in Borg itself.
 
-These tests are a subset of the borg/testsuite and are run with Pythons built-in unittest, hence none of
+These tests are a subset of the borg/testsuite and are run with Python's built-in unittest, hence none of
 the tests used for this can or should be ported to py.test currently.
 
-To assert that self test discovery works correctly the number of tests is kept in the SELFTEST_COUNT
+To assert that self-test discovery works correctly the number of tests is kept in the SELFTEST_COUNT
 variable. SELFTEST_COUNT must be updated if new tests are added or removed to or from any of the tests
 used here.
 """
@@ -68,7 +68,7 @@ def selftest(logger):
     for test_case in SELFTEST_CASES:
         module = sys.modules[test_case.__module__]
         # a normal borg user does not have pytest installed, we must not require it in the test modules used here.
-        # note: this only detects the usual toplevel import
+        # Note: this only detects the usual top-level import
         assert 'pytest' not in dir(module), "pytest must not be imported in %s" % module.__name__
         test_suite.addTest(defaultTestLoader.loadTestsFromTestCase(test_case))
     test_suite.run(result)

+ 2 - 2
src/borg/shellpattern.py

@@ -10,14 +10,14 @@ def translate(pat, match_end=r"\Z"):
     any path separator. Wrap meta-characters in brackets for a literal match (i.e. "[?]" to match the literal character
     "?").
 
-    Using match_end=regex one can give a regular expression that is used to match after the regex that is generated from
+    Using match_end=regex, one can provide a regular expression that is used to match after the regex that is generated from
     the pattern. The default is to match the end of the string.
 
     This function is derived from the "fnmatch" module distributed with the Python standard library.
 
     Copyright (C) 2001-2016 Python Software Foundation. All rights reserved.
 
-    TODO: support {alt1,alt2} shell-style alternatives
+    TODO: support {alt1,alt2} shell-style alternatives.
 
     """
     sep = os.path.sep

+ 48 - 47
src/borg/upgrader.py

@@ -23,15 +23,15 @@ class AtticRepositoryUpgrader(Repository):
         super().__init__(*args, **kw)
 
     def upgrade(self, dryrun=True, inplace=False, progress=False):
-        """convert an attic repository to a borg repository
+        """Convert an Attic repository to a Borg repository.
 
-        those are the files that need to be upgraded here, from most
+        These are the files that need to be upgraded here, from most
         important to least important: segments, key files, and various
-        caches, the latter being optional, as they will be rebuilt if
+        cachesthe latter being optional, as they will be rebuilt if
         missing.
 
-        we nevertheless do the order in reverse, as we prefer to do
-        the fast stuff first, to improve interactivity.
+        We nevertheless do the order in reverse, as we prefer to do
+        the fast stuff first to improve interactivity.
         """
         with self:
             backup = None
@@ -70,13 +70,14 @@ class AtticRepositoryUpgrader(Repository):
 
     @staticmethod
     def convert_segments(segments, dryrun=True, inplace=False, progress=False):
-        """convert repository segments from attic to borg
+        """Convert repository segments from Attic to Borg.
 
-        replacement pattern is `s/ATTICSEG/BORG_SEG/` in files in
+        Replacement pattern is `s/ATTICSEG/BORG_SEG/` in files in
         `$ATTIC_REPO/data/**`.
 
-        luckily the magic string length didn't change so we can just
-        replace the 8 first bytes of all regular files in there."""
+        Luckily the magic string length did not change, so we can just
+        replace the first 8 bytes of all regular files in there.
+        """
         logger.info("converting %d segments..." % len(segments))
         segment_count = len(segments)
         pi = ProgressIndicatorPercent(total=segment_count, msg="Converting segments %3.0f%%", msgid='upgrade.convert_segments')
@@ -94,55 +95,57 @@ class AtticRepositoryUpgrader(Repository):
     def header_replace(filename, old_magic, new_magic, inplace=True):
         with open(filename, 'r+b') as segment:
             segment.seek(0)
-            # only write if necessary
+            # Only write if necessary.
             if segment.read(len(old_magic)) == old_magic:
                 if inplace:
                     segment.seek(0)
                     segment.write(new_magic)
                 else:
-                    # rename the hardlink and rewrite the file. this works
-                    # because the file is still open. so even though the file
+                    # Rename the hardlink and rewrite the file. This works
+                    # because the file is still open. Even though the file
                     # is renamed, we can still read it until it is closed.
                     os.rename(filename, filename + '.tmp')
                     with open(filename, 'wb') as new_segment:
                         new_segment.write(new_magic)
                         new_segment.write(segment.read())
-                    # the little dance with the .tmp file is necessary
-                    # because Windows won't allow overwriting an open file.
+                    # The little dance with the .tmp file is necessary
+                    # because Windows will not allow overwriting an open file.
                     os.unlink(filename + '.tmp')
 
     def find_attic_keyfile(self):
-        """find the attic keyfiles
+        """Find the Attic key files.
 
-        the keyfiles are loaded by `KeyfileKey.find_key_file()`. that
+        The key files are loaded by `KeyfileKey.find_key_file()`. That
         finds the keys with the right identifier for the repo.
 
-        this is expected to look into $HOME/.attic/keys or
+        This is expected to look into $HOME/.attic/keys or
         $ATTIC_KEYS_DIR for key files matching the given Borg
         repository.
 
-        it is expected to raise an exception (KeyfileNotFoundError) if
-        no key is found. whether that exception is from Borg or Attic
+        It is expected to raise an exception (KeyfileNotFoundError) if
+        no key is found. Whether that exception is from Borg or Attic
         is unclear.
 
-        this is split in a separate function in case we want to use
-        the attic code here directly, instead of our local
-        implementation."""
+        This is split into a separate function in case we want to use
+        the Attic code here directly, instead of our local
+        implementation.
+        """
         return AtticKeyfileKey.find_key_file(self)
 
     @staticmethod
     def convert_keyfiles(keyfile, dryrun):
-        """convert key files from attic to borg
+        """Convert key files from Attic to Borg.
 
-        replacement pattern is `s/ATTIC KEY/BORG_KEY/` in
+        Replacement pattern is `s/ATTIC KEY/BORG_KEY/` in
         `get_keys_dir()`, that is `$ATTIC_KEYS_DIR` or
         `$HOME/.attic/keys`, and moved to `$BORG_KEYS_DIR` or
         `$HOME/.config/borg/keys`.
 
-        no need to decrypt to convert. we need to rewrite the whole
-        key file because magic string length changed, but that's not a
-        problem because the keyfiles are small (compared to, say,
-        all the segments)."""
+        No need to decrypt to convert. We need to rewrite the whole
+        key file because the magic string length changed, but that is not a
+        problem because the key files are small (compared to, say,
+        all the segments).
+        """
         logger.info("converting keyfile %s" % keyfile)
         with open(keyfile) as f:
             data = f.read()
@@ -154,16 +157,16 @@ class AtticRepositoryUpgrader(Repository):
                 f.write(data)
 
     def convert_repo_index(self, dryrun, inplace):
-        """convert some repo files
+        """Convert some repo files.
 
-        those are all hash indexes, so we need to
+        These are all hash indexes, so we need to
         `s/ATTICIDX/BORG_IDX/` in a few locations:
 
         * the repository index (in `$ATTIC_REPO/index.%d`, where `%d`
           is the `Repository.get_index_transaction_id()`), which we
-          should probably update, with a lock, see
-          `Repository.open()`, which i'm not sure we should use
-          because it may write data on `Repository.close()`...
+          should probably update with a lock (see
+          `Repository.open()`), although we might avoid it because it may
+          write data on `Repository.close()`.
         """
         transaction_id = self.get_index_transaction_id()
         if transaction_id is None:
@@ -175,16 +178,16 @@ class AtticRepositoryUpgrader(Repository):
                 AtticRepositoryUpgrader.header_replace(index, b'ATTICIDX', b'BORG_IDX', inplace=inplace)
 
     def convert_cache(self, dryrun):
-        """convert caches from attic to borg
+        """Convert caches from Attic to Borg.
 
-        those are all hash indexes, so we need to
+        These are all hash indexes, so we need to
         `s/ATTICIDX/BORG_IDX/` in a few locations:
 
         * the `files` and `chunks` cache (in `$ATTIC_CACHE_DIR` or
           `$HOME/.cache/attic/<repoid>/`), which we could just drop,
-          but if we'd want to convert, we could open it with the
-          `Cache.open()`, edit in place and then `Cache.close()` to
-          make sure we have locking right
+          but if we wanted to convert it, we could open it with
+          `Cache.open()`, edit in place, and then `Cache.close()` to
+          make sure we have locking right.
         """
         # copy of attic's get_cache_dir()
         attic_cache_dir = os.environ.get('ATTIC_CACHE_DIR',
@@ -194,19 +197,17 @@ class AtticRepositoryUpgrader(Repository):
         borg_cache_dir = os.path.join(get_cache_dir(), self.id_str)
 
         def copy_cache_file(path):
-            """copy the given attic cache path into the borg directory
+            """Copy the given Attic cache path into the Borg directory.
 
-            does nothing if dryrun is True. also expects
+            Does nothing if dryrun is True. Also expects
             attic_cache_dir and borg_cache_dir to be set in the parent
-            scope, to the directories path including the repository
+            scope, to the directories' paths including the repository
             identifier.
 
-            :params path: the basename of the cache file to copy
-            (example: "files" or "chunks") as a string
-
-            :returns: the borg file that was created or None if no
-            Attic cache file was found.
-
+            :param path: the basename of the cache file to copy
+                (example: "files" or "chunks") as a string
+            :returns: the Borg file that was created, or None if no
+                Attic cache file was found.
             """
             attic_file = os.path.join(attic_cache_dir, path)
             if os.path.exists(attic_file):

+ 2 - 2
src/borg/version.py

@@ -13,7 +13,7 @@ def parse_version(version):
     For final versions the last element is a -1.
     For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta.
 
-    This version format is part of the remote protocol, don‘t change in breaking ways.
+    This version format is part of the remote protocol; don't change it in breaking ways.
     """
     version_re = r"""
         (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)   # version, e.g. 1.2.33
@@ -34,7 +34,7 @@ def parse_version(version):
 
 
 def format_version(version):
-    """a reverse for parse_version (obviously without the dropped information)"""
+    """A reverse for parse_version (obviously without the dropped information)."""
     f = []
     it = iter(version)
     while True:

+ 13 - 14
src/borg/xattr.py

@@ -39,8 +39,7 @@ if sys.platform.startswith('linux'):
 
 
 def is_enabled(path=None):
-    """Determine if xattr is enabled on the filesystem
-    """
+    """Determine if xattr is enabled on the filesystem."""
     with tempfile.NamedTemporaryFile(dir=path, prefix='borg-tmp') as f:
         fd = f.fileno()
         name, value = b'user.name', b'value'
@@ -66,7 +65,7 @@ def get_all(path, follow_symlinks=False):
     and only applies when *path* is not an open file descriptor.
 
     The returned mapping maps xattr names (bytes) to values (bytes or None).
-    None indicates, as a xattr value, an empty value, i.e. a value of length zero.
+    None indicates, as an xattr value, an empty value, i.e. a value of length zero.
     """
     if isinstance(path, str):
         path = os.fsencode(path)
@@ -75,18 +74,18 @@ def get_all(path, follow_symlinks=False):
         names = listxattr(path, follow_symlinks=follow_symlinks)
         for name in names:
             try:
-                # xattr name is a bytes object, we directly use it.
-                # if we get an empty xattr value (b''), we store None into the result dict -
-                # borg always did it like that...
+                # xattr name is a bytes object; we directly use it.
+                # If we get an empty xattr value (b''), we store None into the result dict—
+                # Borg has always done it like that.
                 result[name] = getxattr(path, name, follow_symlinks=follow_symlinks) or None
             except OSError as e:
-                # note: platform.xattr._check has already made a nice exception e with errno, msg, path/fd
-                if e.errno in (ENOATTR, ):  # errors we just ignore silently
-                    # ENOATTR: a race has happened: xattr names were deleted after list.
+                # Note: platform.xattr._check has already made a nice exception e with errno, msg, path/fd
+                if e.errno in (ENOATTR, ):  # errors we ignore silently
+                    # ENOATTR: a race has happened: xattr names were deleted after listing.
                     pass
                 else:  # all others: warn, skip this single xattr name, continue processing other xattrs
                     # EPERM: we were not permitted to read this attribute
-                    # EINVAL: maybe xattr name is invalid or other issue, #6988
+                    # EINVAL: maybe the xattr name is invalid or other issue, #6988
                     logger.warning('when getting extended attribute %s: %s', name.decode(errors='replace'), str(e))
     except OSError as e:
         if e.errno in (errno.ENOTSUP, errno.EPERM):
@@ -105,8 +104,8 @@ def set_all(path, xattrs, follow_symlinks=False):
     *path* can either be a path (str or bytes) or an open file descriptor (int).
     *follow_symlinks* indicates whether symlinks should be followed
     and only applies when *path* is not an open file descriptor.
-    *xattrs* is mapping maps xattr names (bytes) to values (bytes or None).
-    None indicates, as a xattr value, an empty value, i.e. a value of length zero.
+    *xattrs* is a mapping that maps xattr names (bytes) to values (bytes or None).
+    None indicates, as an xattr value, an empty value, i.e. a value of length zero.
 
     Return warning status (True means a non-fatal exception has happened and was dealt with).
     """
@@ -124,8 +123,8 @@ def set_all(path, xattrs, follow_symlinks=False):
             if e.errno == errno.E2BIG:
                 err_str = 'too big for this filesystem (%s)' % str(e)
             elif e.errno == errno.ENOSPC:
-                # ext4 reports ENOSPC when trying to set an xattr with >4kiB while ext4 can only support 4kiB xattrs
-                # (in this case, this is NOT a "disk full" error, just a ext4 limitation).
+                # ext4 reports ENOSPC when trying to set an xattr with >4 KiB while ext4 can only support 4 KiB xattrs
+                # (in this case, this is NOT a "disk full" error, just an ext4 limitation).
                 err_str = 'fs full or xattr too big? [xattr len = %d] (%s)' % (len(v), str(e))
             else:
                 # generic handler