Преглед изворни кода

Merge pull request #8914 from ThomasWaldmann/pathlib

refactor: use pathlib.Path
TW пре 1 недеља
родитељ
комит
a44e259b89

+ 5 - 5
src/borg/archiver/compact_cmd.py

@@ -1,5 +1,5 @@
 import argparse
-import os
+from pathlib import Path
 
 from ._common import with_repository
 from ..archive import Archive
@@ -83,8 +83,8 @@ class ArchiveGarbageCollector:
         """
         logger.info("Cleaning up files cache...")
 
-        cache_dir = os.path.join(get_cache_dir(), self.repository.id_str)
-        if not os.path.exists(cache_dir):
+        cache_dir = Path(get_cache_dir()) / self.repository.id_str
+        if not cache_dir.exists():
             logger.debug("Cache directory does not exist, skipping files cache cleanup")
             return
 
@@ -104,9 +104,9 @@ class ArchiveGarbageCollector:
         unused_files_cache_names = files_cache_names - used_files_cache_names
 
         for cache_filename in unused_files_cache_names:
-            cache_path = os.path.join(cache_dir, cache_filename)
+            cache_path = cache_dir / cache_filename
             try:
-                os.unlink(cache_path)
+                cache_path.unlink()
             except (FileNotFoundError, PermissionError) as e:
                 logger.warning(f"Could not access cache file: {e}")
         logger.info(f"Removed {len(unused_files_cache_names)} unused files cache files.")

+ 26 - 26
src/borg/cache.py

@@ -5,6 +5,7 @@ import shutil
 import stat
 from collections import namedtuple
 from datetime import datetime, timezone, timedelta
+from pathlib import Path
 from time import perf_counter
 
 from borgstore.backends.errors import PermissionDenied
@@ -63,7 +64,7 @@ def discover_files_cache_names(path, files_cache_name="files"):
     :param files_cache_name: base name of the files cache files
     :return: list of files cache file names
     """
-    return [fn for fn in os.listdir(path) if fn.startswith(files_cache_name + ".")]
+    return [p.name for p in path.iterdir() if p.name.startswith(files_cache_name + ".")]
 
 
 # chunks is a list of ChunkListEntry
@@ -92,34 +93,33 @@ class SecurityManager:
 
     def __init__(self, repository):
         self.repository = repository
-        self.dir = get_security_dir(repository.id_str, legacy=(repository.version == 1))
-        self.cache_dir = cache_dir(repository)
-        self.key_type_file = os.path.join(self.dir, "key-type")
-        self.location_file = os.path.join(self.dir, "location")
-        self.manifest_ts_file = os.path.join(self.dir, "manifest-timestamp")
+        self.dir = Path(get_security_dir(repository.id_str, legacy=(repository.version == 1)))
+        self.key_type_file = self.dir / "key-type"
+        self.location_file = self.dir / "location"
+        self.manifest_ts_file = self.dir / "manifest-timestamp"
 
     @staticmethod
     def destroy(repository, path=None):
         """destroy the security dir for ``repository`` or at ``path``"""
         path = path or get_security_dir(repository.id_str, legacy=(repository.version == 1))
-        if os.path.exists(path):
+        if Path(path).exists():
             shutil.rmtree(path)
 
     def known(self):
-        return all(os.path.exists(f) for f in (self.key_type_file, self.location_file, self.manifest_ts_file))
+        return all(f.exists() for f in (self.key_type_file, self.location_file, self.manifest_ts_file))
 
     def key_matches(self, key):
         if not self.known():
             return False
         try:
-            with open(self.key_type_file) as fd:
+            with self.key_type_file.open() as fd:
                 type = fd.read()
                 return type == str(key.TYPE)
         except OSError as exc:
             logger.warning("Could not read/parse key type file: %s", exc)
 
     def save(self, manifest, key):
-        logger.debug("security: saving state for %s to %s", self.repository.id_str, self.dir)
+        logger.debug("security: saving state for %s to %s", self.repository.id_str, str(self.dir))
         current_location = self.repository._location.canonical_path()
         logger.debug("security: current location   %s", current_location)
         logger.debug("security: key type           %s", str(key.TYPE))
@@ -134,7 +134,7 @@ class SecurityManager:
     def assert_location_matches(self):
         # Warn user before sending data to a relocated repository
         try:
-            with open(self.location_file) as fd:
+            with self.location_file.open() as fd:
                 previous_location = fd.read()
             logger.debug("security: read previous location %r", previous_location)
         except FileNotFoundError:
@@ -167,7 +167,7 @@ class SecurityManager:
 
     def assert_no_manifest_replay(self, manifest, key):
         try:
-            with open(self.manifest_ts_file) as fd:
+            with self.manifest_ts_file.open() as fd:
                 timestamp = fd.read()
             logger.debug("security: read manifest timestamp %r", timestamp)
         except FileNotFoundError:
@@ -235,7 +235,7 @@ def assert_secure(repository, manifest):
 
 
 def cache_dir(repository, path=None):
-    return path or os.path.join(get_cache_dir(), repository.id_str)
+    return Path(path) if path else Path(get_cache_dir()) / repository.id_str
 
 
 class CacheConfig:
@@ -243,7 +243,7 @@ class CacheConfig:
         self.repository = repository
         self.path = cache_dir(repository, path)
         logger.debug("Using %s as cache", self.path)
-        self.config_path = os.path.join(self.path, "config")
+        self.config_path = self.path / "config"
 
     def __enter__(self):
         self.open()
@@ -253,7 +253,7 @@ class CacheConfig:
         self.close()
 
     def exists(self):
-        return os.path.exists(self.config_path)
+        return self.config_path.exists()
 
     def create(self):
         assert not self.exists()
@@ -272,7 +272,7 @@ class CacheConfig:
 
     def load(self):
         self._config = configparser.ConfigParser(interpolation=None)
-        with open(self.config_path) as fd:
+        with self.config_path.open() as fd:
             self._config.read_file(fd)
         self._check_upgrade(self.config_path)
         self.id = self._config.get("cache", "repository")
@@ -361,10 +361,10 @@ class Cache:
     @staticmethod
     def destroy(repository, path=None):
         """destroy the cache for ``repository`` or at ``path``"""
-        path = path or os.path.join(get_cache_dir(), repository.id_str)
-        config = os.path.join(path, "config")
-        if os.path.exists(config):
-            os.remove(config)  # kill config first
+        path = cache_dir(repository, path)
+        config = path / "config"
+        if config.exists():
+            config.unlink()  # kill config first
             shutil.rmtree(path)
 
     def __new__(
@@ -540,7 +540,7 @@ class FilesCacheMixin:
         msg = None
         try:
             with IntegrityCheckedFile(
-                path=os.path.join(self.path, self.files_cache_name()),
+                path=str(self.path / self.files_cache_name()),
                 write=False,
                 integrity_data=self.cache_config.integrity.get(self.files_cache_name()),
             ) as fd:
@@ -583,7 +583,7 @@ class FilesCacheMixin:
         ttl = int(os.environ.get("BORG_FILES_CACHE_TTL", 2))
         files_cache_logger.debug("FILES-CACHE-SAVE: starting...")
         # TODO: use something like SaveFile here, but that didn't work due to SyncFile missing .seek().
-        with IntegrityCheckedFile(path=os.path.join(self.path, self.files_cache_name()), write=True) as fd:
+        with IntegrityCheckedFile(path=str(self.path / self.files_cache_name()), write=True) as fd:
             entries = 0
             age_discarded = 0
             race_discarded = 0
@@ -983,7 +983,7 @@ class AdHocWithFilesCache(FilesCacheMixin, ChunksMixin):
         self.cache_config = CacheConfig(self.repository, self.path)
 
         # Warn user before sending data to a never seen before unencrypted repository
-        if not os.path.exists(self.path):
+        if not self.path.exists():
             self.security_manager.assert_access_unknown(warn_if_unencrypted, manifest, self.key)
             self.create()
 
@@ -1009,13 +1009,13 @@ class AdHocWithFilesCache(FilesCacheMixin, ChunksMixin):
 
     def create(self):
         """Create a new empty cache at `self.path`"""
-        os.makedirs(self.path)
-        with open(os.path.join(self.path, "README"), "w") as fd:
+        self.path.mkdir(parents=True, exist_ok=True)
+        with open(self.path / "README", "w") as fd:
             fd.write(CACHE_README)
         self.cache_config.create()
 
     def open(self):
-        if not os.path.isdir(self.path):
+        if not self.path.is_dir():
             raise Exception("%s Does not look like a Borg cache" % self.path)
         self.cache_config.open()
         self.cache_config.load()

+ 7 - 7
src/borg/crypto/file_integrity.py

@@ -1,9 +1,9 @@
 import hashlib
 import io
 import json
-import os
 from hmac import compare_digest
 from collections.abc import Callable
+from pathlib import Path
 
 from ..helpers import IntegrityError
 from ..logger import create_logger
@@ -162,7 +162,7 @@ class IntegrityCheckedFile(FileLikeWrapper):
         # Changing the name however imbues a change of context that is not permissible.
         # While Borg does not use anything except ASCII in these file names, it's important to use
         # the same encoding everywhere for portability. Using os.fsencode() would be wrong.
-        filename = os.path.basename(filename or self.path)
+        filename = Path(filename or self.path).name
         self.hasher.update(("%10d" % len(filename)).encode())
         self.hasher.update(filename.encode())
 
@@ -219,9 +219,9 @@ class IntegrityCheckedFile(FileLikeWrapper):
 class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
     def __init__(self, path, write, filename=None, override_fd=None):
         super().__init__(path, write, filename, override_fd)
-        filename = filename or os.path.basename(path)
-        output_dir = os.path.dirname(path)
-        self.output_integrity_file = self.integrity_file_path(os.path.join(output_dir, filename))
+        path_obj = Path(path)
+        filename = filename or path_obj.name
+        self.output_integrity_file = self.integrity_file_path(path_obj.parent / filename)
 
     def load_integrity_data(self, path, integrity_data):
         assert not integrity_data, "Cannot pass explicit integrity_data to DetachedIntegrityCheckedFile"
@@ -229,7 +229,7 @@ class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
 
     @staticmethod
     def integrity_file_path(path):
-        return path + ".integrity"
+        return Path(str(path) + ".integrity")
 
     @classmethod
     def read_integrity_file(cls, path):
@@ -243,5 +243,5 @@ class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
             raise FileIntegrityError(path)
 
     def store_integrity_data(self, data: str):
-        with open(self.output_integrity_file, "w") as fd:
+        with self.output_integrity_file.open("w") as fd:
             fd.write(data)

+ 9 - 8
src/borg/crypto/key.py

@@ -3,6 +3,7 @@ import hmac
 import os
 import textwrap
 from hashlib import sha256, pbkdf2_hmac
+from pathlib import Path
 from typing import Literal, ClassVar
 from collections.abc import Callable
 
@@ -642,11 +643,11 @@ class FlexiKey:
 
     def _find_key_in_keys_dir(self):
         id = self.repository.id
-        keys_dir = get_keys_dir()
-        for name in os.listdir(keys_dir):
-            filename = os.path.join(keys_dir, name)
+        keys_path = Path(get_keys_dir())
+        for entry in keys_path.iterdir():
+            filename = keys_path / entry.name
             try:
-                return self.sanity_check(filename, id)
+                return self.sanity_check(str(filename), id)
             except (KeyfileInvalidError, KeyfileMismatchError):
                 pass
 
@@ -668,12 +669,12 @@ class FlexiKey:
 
     def _get_new_target_in_keys_dir(self, args):
         filename = args.location.to_key_filename()
-        path = filename
+        path = Path(filename)
         i = 1
-        while os.path.exists(path):
+        while path.exists():
             i += 1
-            path = filename + ".%d" % i
-        return path
+            path = Path(filename + ".%d" % i)
+        return str(path)
 
     def load(self, target, passphrase):
         if self.STORAGE == KeyBlobStorage.KEYFILE:

+ 33 - 31
src/borg/fslocking.py

@@ -3,6 +3,7 @@ import json
 import os
 import tempfile
 import time
+from pathlib import Path
 
 from . import platform
 from .helpers import Error, ErrorWithTraceback
@@ -119,9 +120,9 @@ class ExclusiveLock:
     def __init__(self, path, timeout=None, sleep=None, id=None):
         self.timeout = timeout
         self.sleep = sleep
-        self.path = os.path.abspath(path)
+        self.path = Path(path).absolute()
         self.id = id or platform.get_process_id()
-        self.unique_name = os.path.join(self.path, "%s.%d-%x" % self.id)
+        self.unique_name = self.path / ("%s.%d-%x" % self.id)
         self.kill_stale_locks = True
         self.stale_warning_printed = False
 
@@ -132,34 +133,34 @@ class ExclusiveLock:
         self.release()
 
     def __repr__(self):
-        return f"<{self.__class__.__name__}: {self.unique_name!r}>"
+        return f"<{self.__class__.__name__}: {str(self.unique_name)!r}>"
 
     def acquire(self, timeout=None, sleep=None):
         if timeout is None:
             timeout = self.timeout
         if sleep is None:
             sleep = self.sleep
-        parent_path, base_name = os.path.split(self.path)
-        unique_base_name = os.path.basename(self.unique_name)
+        parent_path, base_name = str(self.path.parent), self.path.name
+        unique_base_name = self.unique_name.name
         temp_path = None
         try:
             temp_path = tempfile.mkdtemp(".tmp", base_name + ".", parent_path)
-            temp_unique_name = os.path.join(temp_path, unique_base_name)
-            with open(temp_unique_name, "wb"):
+            temp_unique_name = Path(temp_path) / unique_base_name
+            with temp_unique_name.open("wb"):
                 pass
         except OSError as err:
-            raise LockFailed(self.path, str(err)) from None
+            raise LockFailed(str(self.path), str(err)) from None
         else:
             timer = TimeoutTimer(timeout, sleep).start()
             while True:
                 try:
-                    os.replace(temp_path, self.path)
+                    Path(temp_path).replace(str(self.path))
                 except OSError:  # already locked
                     if self.by_me():
                         return self
                     self.kill_stale_lock()
                     if timer.timed_out_or_sleep():
-                        raise LockTimeout(self.path) from None
+                        raise LockTimeout(str(self.path)) from None
                 else:
                     temp_path = None  # see finally:-block below
                     return self
@@ -178,13 +179,13 @@ class ExclusiveLock:
 
     def release(self):
         if not self.is_locked():
-            raise NotLocked(self.path)
+            raise NotLocked(str(self.path))
         if not self.by_me():
-            raise NotMyLock(self.path)
-        os.unlink(self.unique_name)
+            raise NotMyLock(str(self.path))
+        self.unique_name.unlink()
         for retry in range(42):
             try:
-                os.rmdir(self.path)
+                self.path.rmdir()
             except OSError as err:
                 if err.errno in (errno.EACCES,):
                     # windows behaving strangely? -> just try again.
@@ -198,14 +199,14 @@ class ExclusiveLock:
             return
 
     def is_locked(self):
-        return os.path.exists(self.path)
+        return self.path.exists()
 
     def by_me(self):
-        return os.path.exists(self.unique_name)
+        return self.unique_name.exists()
 
     def kill_stale_lock(self):
         try:
-            names = os.listdir(self.path)
+            names = [p.name for p in self.path.iterdir()]
         except FileNotFoundError:  # another process did our job in the meantime.
             return False
         except PermissionError:  # win32 might throw this.
@@ -219,7 +220,7 @@ class ExclusiveLock:
                     thread = int(thread_str, 16)
                 except ValueError:
                     # Malformed lock name? Or just some new format we don't understand?
-                    logger.error("Found malformed lock %s in %s. Please check/fix manually.", name, self.path)
+                    logger.error("Found malformed lock %s in %s. Please check/fix manually.", name, str(self.path))
                     return False
 
                 if platform.process_alive(host, pid, thread):
@@ -235,7 +236,7 @@ class ExclusiveLock:
                     return False
 
                 try:
-                    os.unlink(os.path.join(self.path, name))
+                    (self.path / name).unlink()
                     logger.warning("Killed stale lock %s.", name)
                 except OSError as err:
                     if not self.stale_warning_printed:
@@ -245,7 +246,7 @@ class ExclusiveLock:
                     return False
 
         try:
-            os.rmdir(self.path)
+            self.path.rmdir()
         except OSError as err:
             if err.errno in (errno.ENOTEMPTY, errno.EEXIST, errno.ENOENT):
                 # Directory is not empty or doesn't exist any more = we lost the race to somebody else--which is ok.
@@ -258,18 +259,18 @@ class ExclusiveLock:
 
     def break_lock(self):
         if self.is_locked():
-            for name in os.listdir(self.path):
-                os.unlink(os.path.join(self.path, name))
-            os.rmdir(self.path)
+            for path_obj in self.path.iterdir():
+                path_obj.unlink()
+            self.path.rmdir()
 
     def migrate_lock(self, old_id, new_id):
         """migrate the lock ownership from old_id to new_id"""
         assert self.id == old_id
-        new_unique_name = os.path.join(self.path, "%s.%d-%x" % new_id)
+        new_unique_name = self.path / ("%s.%d-%x" % new_id)
         if self.is_locked() and self.by_me():
-            with open(new_unique_name, "wb"):
+            with new_unique_name.open("wb"):
                 pass
-            os.unlink(self.unique_name)
+            self.unique_name.unlink()
         self.id, self.unique_name = new_id, new_unique_name
 
 
@@ -282,13 +283,14 @@ class LockRoster:
     """
 
     def __init__(self, path, id=None):
+        assert isinstance(path, Path)
         self.path = path
         self.id = id or platform.get_process_id()
         self.kill_stale_locks = True
 
     def load(self):
         try:
-            with open(self.path) as f:
+            with self.path.open() as f:
                 data = json.load(f)
 
             # Just nuke the stale locks early on load
@@ -313,12 +315,12 @@ class LockRoster:
         return data
 
     def save(self, data):
-        with open(self.path, "w") as f:
+        with self.path.open("w") as f:
             json.dump(data, f)
 
     def remove(self):
         try:
-            os.unlink(self.path)
+            self.path.unlink()
         except FileNotFoundError:
             pass
 
@@ -392,11 +394,11 @@ class Lock:
         self.timeout = timeout
         self.id = id or platform.get_process_id()
         # globally keeping track of shared and exclusive lockers:
-        self._roster = LockRoster(path + ".roster", id=id)
+        self._roster = LockRoster(Path(path + ".roster"), id=id)
         # an exclusive lock, used for:
         # - holding while doing roster queries / updates
         # - holding while the Lock itself is exclusive
-        self._lock = ExclusiveLock(path + ".exclusive", id=id, timeout=timeout)
+        self._lock = ExclusiveLock(str(Path(path + ".exclusive")), id=id, timeout=timeout)
 
     def __enter__(self):
         return self.acquire()

+ 23 - 20
src/borg/helpers/fs.py

@@ -7,6 +7,7 @@ import stat
 import subprocess
 import sys
 import textwrap
+from pathlib import Path
 
 import platformdirs
 
@@ -34,7 +35,7 @@ def ensure_dir(path, mode=stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, pretty_dea
     An exception otherwise. If a deadly exception happened it is reraised.
     """
     try:
-        os.makedirs(path, mode=mode, exist_ok=True)
+        Path(path).mkdir(mode=mode, parents=True, exist_ok=True)
     except OSError as e:
         if pretty_deadly:
             raise Error(str(e))
@@ -52,12 +53,12 @@ def get_base_dir(*, legacy=False):
     """
     if legacy:
         base_dir = os.environ.get("BORG_BASE_DIR") or os.environ.get("HOME")
-        # os.path.expanduser() behaves differently for '~' and '~someuser' as
+        # Path.expanduser() behaves differently for '~' and '~someuser' as
         # parameters: when called with an explicit username, the possibly set
         # environment variable HOME is no longer respected. So we have to check if
         # it is set and only expand the user's home directory if HOME is unset.
         if not base_dir:
-            base_dir = os.path.expanduser("~%s" % os.environ.get("USER", ""))
+            base_dir = str(Path(f"~{os.environ.get('USER', '')}").expanduser())
     else:
         # we only care for BORG_BASE_DIR here, as it can be used to override the base dir
         # and not use any more or less platform specific way to determine the base dir.
@@ -68,7 +69,7 @@ def get_base_dir(*, legacy=False):
 def join_base_dir(*paths, **kw):
     legacy = kw.get("legacy", True)
     base_dir = get_base_dir(legacy=legacy)
-    return None if base_dir is None else os.path.join(base_dir, *paths)
+    return None if base_dir is None else str(Path(base_dir).joinpath(*paths))
 
 
 def get_keys_dir(*, legacy=False, create=True):
@@ -76,7 +77,7 @@ def get_keys_dir(*, legacy=False, create=True):
     keys_dir = os.environ.get("BORG_KEYS_DIR")
     if keys_dir is None:
         # note: do not just give this as default to the environment.get(), see issue #5979.
-        keys_dir = os.path.join(get_config_dir(legacy=legacy), "keys")
+        keys_dir = str(Path(get_config_dir(legacy=legacy)) / "keys")
     if create:
         ensure_dir(keys_dir)
     return keys_dir
@@ -88,9 +89,9 @@ def get_security_dir(repository_id=None, *, legacy=False, create=True):
     if security_dir is None:
         get_dir = get_config_dir if legacy else get_data_dir
         # note: do not just give this as default to the environment.get(), see issue #5979.
-        security_dir = os.path.join(get_dir(legacy=legacy), "security")
+        security_dir = str(Path(get_dir(legacy=legacy)) / "security")
     if repository_id:
-        security_dir = os.path.join(security_dir, repository_id)
+        security_dir = str(Path(security_dir) / repository_id)
     if create:
         ensure_dir(security_dir)
     return security_dir
@@ -119,7 +120,7 @@ def get_runtime_dir(*, legacy=False, create=True):
 
 
 def get_socket_filename():
-    return os.path.join(get_runtime_dir(), "borg.sock")
+    return str(Path(get_runtime_dir()) / "borg.sock")
 
 
 def get_cache_dir(*, legacy=False, create=True):
@@ -132,15 +133,15 @@ def get_cache_dir(*, legacy=False, create=True):
         if not os.environ.get("BORG_BASE_DIR"):
             cache_home = os.environ.get("XDG_CACHE_HOME", cache_home)
         # Use BORG_CACHE_DIR if set, otherwise assemble final path from cache home path
-        cache_dir = os.environ.get("BORG_CACHE_DIR", os.path.join(cache_home, "borg"))
+        cache_dir = os.environ.get("BORG_CACHE_DIR", str(Path(cache_home) / "borg"))
     else:
         cache_dir = os.environ.get(
             "BORG_CACHE_DIR", join_base_dir(".cache", "borg", legacy=legacy) or platformdirs.user_cache_dir("borg")
         )
     if create:
         ensure_dir(cache_dir)
-        cache_tag_fn = os.path.join(cache_dir, CACHE_TAG_NAME)
-        if not os.path.exists(cache_tag_fn):
+        cache_tag_fn = Path(cache_dir) / CACHE_TAG_NAME
+        if not cache_tag_fn.exists():
             cache_tag_contents = (
                 CACHE_TAG_CONTENTS
                 + textwrap.dedent(
@@ -168,7 +169,7 @@ def get_config_dir(*, legacy=False, create=True):
         if not os.environ.get("BORG_BASE_DIR"):
             config_home = os.environ.get("XDG_CONFIG_HOME", config_home)
         # Use BORG_CONFIG_DIR if set, otherwise assemble final path from config home path
-        config_dir = os.environ.get("BORG_CONFIG_DIR", os.path.join(config_home, "borg"))
+        config_dir = os.environ.get("BORG_CONFIG_DIR", str(Path(config_home) / "borg"))
     else:
         config_dir = os.environ.get(
             "BORG_CONFIG_DIR", join_base_dir(".config", "borg", legacy=legacy) or platformdirs.user_config_dir("borg")
@@ -191,7 +192,7 @@ def dir_is_cachedir(path=None, dir_fd=None):
         if dir_fd is not None:
             tag_fd = os.open(CACHE_TAG_NAME, os.O_RDONLY, dir_fd=dir_fd)
         else:
-            tag_fd = os.open(os.path.join(path, CACHE_TAG_NAME), os.O_RDONLY)
+            tag_fd = os.open(str(Path(path) / CACHE_TAG_NAME), os.O_RDONLY)
         return os.read(tag_fd, len(CACHE_TAG_CONTENTS)) == CACHE_TAG_CONTENTS
     except (FileNotFoundError, OSError):
         return False
@@ -228,8 +229,8 @@ def dir_is_tagged(path=None, exclude_caches=None, exclude_if_present=None, dir_f
             tag_names.append(CACHE_TAG_NAME)
         if exclude_if_present is not None:
             for tag in exclude_if_present:
-                tag_path = os.path.join(path, tag)
-                if os.path.exists(tag_path):
+                tag_path = Path(path) / tag
+                if tag_path.exists():
                     tag_names.append(tag)
 
     return tag_names
@@ -417,13 +418,14 @@ def secure_erase(path, *, avoid_collateral_damage):
     If avoid_collateral_damage is False, we always secure erase.
     If there are hardlinks pointing to the same inode as <path>, they will contain random garbage afterwards.
     """
-    with open(path, "r+b") as fd:
+    path_obj = Path(path)
+    with path_obj.open("r+b") as fd:
         st = os.stat(fd.fileno())
         if not (st.st_nlink > 1 and avoid_collateral_damage):
             fd.write(os.urandom(st.st_size))
             fd.flush()
             os.fsync(fd.fileno())
-    os.unlink(path)
+    path_obj.unlink()
 
 
 def safe_unlink(path):
@@ -438,14 +440,15 @@ def safe_unlink(path):
     recover. Refer to the "File system interaction" section
     in legacyrepository.py for further explanations.
     """
+    path_obj = Path(path)
     try:
-        os.unlink(path)
+        path_obj.unlink()
     except OSError as unlink_err:
         if unlink_err.errno != errno.ENOSPC:
             # not free space related, give up here.
             raise
         # we ran out of space while trying to delete the file.
-        st = os.stat(path)
+        st = path_obj.stat()
         if st.st_nlink > 1:
             # rather give up here than cause collateral damage to the other hardlink.
             raise
@@ -459,7 +462,7 @@ def safe_unlink(path):
             raise unlink_err
         else:
             # successfully truncated the file, try again deleting it:
-            os.unlink(path)
+            path_obj.unlink()
 
 
 def dash_open(path, mode):

+ 0 - 1
src/borg/helpers/misc.py

@@ -1,7 +1,6 @@
 import logging
 import io
 import os
-import os.path
 import platform  # python stdlib import - if this fails, check that cwd != src/borg/
 import sys
 from collections import deque

+ 3 - 0
src/borg/helpers/parseformat.py

@@ -10,6 +10,7 @@ import re
 import shlex
 import stat
 import uuid
+from pathlib import Path
 from typing import ClassVar, Any, TYPE_CHECKING, Literal
 from collections import OrderedDict
 from datetime import datetime, timezone
@@ -1163,6 +1164,8 @@ class BorgJsonEncoder(json.JSONEncoder):
             return o.info()
         if isinstance(o, (AdHocWithFilesCache,)):
             return {"path": o.path}
+        if isinstance(o, Path):
+            return str(o)
         if callable(getattr(o, "to_json", None)):
             return o.to_json()
         return super().default(o)

+ 0 - 1
src/borg/helpers/process.py

@@ -1,6 +1,5 @@
 import contextlib
 import os
-import os.path
 import shlex
 import signal
 import subprocess

+ 8 - 7
src/borg/logger.py

@@ -58,8 +58,9 @@ import queue
 import sys
 import time
 import warnings
+from pathlib import Path
 
-logging_debugging_path: str | None = None  # if set, write borg.logger debugging log to path/borg-*.log
+logging_debugging_path: Path | None = None  # if set, write borg.logger debugging log to thatpath/borg-*.log
 
 configured = False
 borg_serve_log_queue: queue.SimpleQueue = queue.SimpleQueue()
@@ -167,12 +168,12 @@ def setup_logging(
         conf_fname = os.environ.get(env_var, conf_fname)
     if conf_fname:
         try:
-            conf_fname = os.path.abspath(conf_fname)
+            conf_path = Path(conf_fname).absolute()
             # we open the conf file here to be able to give a reasonable
             # error message in case of failure (if we give the filename to
             # fileConfig(), it silently ignores unreadable files and gives
             # unhelpful error msgs like "No section: 'formatters'"):
-            with open(conf_fname) as f:
+            with conf_path.open() as f:
                 logging.config.fileConfig(f)
             configured = True
             logger = logging.getLogger(__name__)
@@ -195,8 +196,8 @@ def setup_logging(
 
     if logging_debugging_path is not None:
         # add an addtl. root handler for debugging purposes
-        log_fname = os.path.join(logging_debugging_path, f"borg-{'serve' if is_serve else 'client'}-root.log")
-        handler2 = logging.StreamHandler(open(log_fname, "a"))
+        log_path = logging_debugging_path / (f"borg-{'serve' if is_serve else 'client'}-root.log")
+        handler2 = logging.StreamHandler(log_path.open("a"))
         handler2.setFormatter(formatter)
         logger.addHandler(handler2)
         logger.warning(f"--- {func} ---")  # only handler2 shall get this
@@ -213,8 +214,8 @@ def setup_logging(
 
     if logging_debugging_path is not None:
         # add an addtl. progress handler for debugging purposes
-        log_fname = os.path.join(logging_debugging_path, f"borg-{'serve' if is_serve else 'client'}-progress.log")
-        bop_handler2 = logging.StreamHandler(open(log_fname, "a"))
+        log_path = logging_debugging_path / (f"borg-{'serve' if is_serve else 'client'}-progress.log")
+        bop_handler2 = logging.StreamHandler(log_path.open("a"))
         bop_handler2.setFormatter(bop_formatter)
         bop_logger.addHandler(bop_handler2)
         json_dict = dict(

+ 7 - 5
src/borg/platform/base.py

@@ -2,6 +2,7 @@ import errno
 import os
 import socket
 import uuid
+from pathlib import Path
 
 from ..helpers import safe_unlink
 from ..platformflags import is_win32
@@ -102,7 +103,7 @@ def sync_dir(path):
         # Opening directories is not supported on windows.
         # TODO: do we need to handle this in some other way?
         return
-    fd = os.open(path, os.O_RDONLY)
+    fd = os.open(str(path), os.O_RDONLY)
     try:
         os.fsync(fd)
     except OSError as os_error:
@@ -164,7 +165,7 @@ class SyncFile:
         mode = "xb" if binary else "x"  # x -> raise FileExists exception in open() if file exists already
         self.path = path
         if fd is None:
-            self.f = open(path, mode=mode)  # python file object
+            self.f = open(str(path), mode=mode)  # python file object
         else:
             self.f = os.fdopen(fd, mode=mode)
         self.fd = self.f.fileno()  # OS-level fd
@@ -197,7 +198,7 @@ class SyncFile:
 
         dirname = None
         try:
-            dirname = os.path.dirname(self.path)
+            dirname = Path(self.path).parent
             self.sync()
         finally:
             self.f.close()
@@ -225,8 +226,9 @@ class SaveFile:
     def __init__(self, path, binary=False):
         self.binary = binary
         self.path = path
-        self.dir = os.path.dirname(path)
-        self.tmp_prefix = os.path.basename(path) + "-"
+        path_obj = Path(path)
+        self.dir = str(path_obj.parent)
+        self.tmp_prefix = path_obj.name + "-"
         self.tmp_fd = None  # OS-level fd
         self.tmp_fname = None  # full path/filename corresponding to self.tmp_fd
         self.f = None  # python-file-like SyncFile

+ 4 - 3
src/borg/testsuite/archiver/compact_cmd_test.py

@@ -1,4 +1,5 @@
-import os
+from pathlib import Path
+
 import pytest
 
 from ...constants import *  # NOQA
@@ -104,8 +105,8 @@ def test_compact_files_cache_cleanup(archivers, request):
         pytest.fail("Could not find repository ID in info output")
 
     # Check cache directory for files cache files
-    cache_dir = os.path.join(get_cache_dir(), repo_id)
-    if not os.path.exists(cache_dir):
+    cache_dir = Path(get_cache_dir()) / repo_id
+    if not cache_dir.exists():
         pytest.skip("Cache directory does not exist, skipping test")
 
     # Get initial files cache files

+ 2 - 1
src/borg/testsuite/fslocking_test.py

@@ -1,5 +1,6 @@
 import random
 import time
+from pathlib import Path
 from threading import Thread, Lock as ThreadingLock
 from traceback import format_exc
 
@@ -306,7 +307,7 @@ class TestLock:
 
 @pytest.fixture()
 def rosterpath(tmpdir):
-    return str(tmpdir.join("roster"))
+    return Path(tmpdir) / "roster"
 
 
 class TestLockRoster:

+ 3 - 2
src/borg/testsuite/helpers/fs_test.py

@@ -2,6 +2,7 @@ import errno
 import os
 import sys
 from contextlib import contextmanager
+from pathlib import Path
 
 import pytest
 
@@ -257,10 +258,10 @@ def test_safe_unlink_is_safe_ENOSPC(tmpdir, monkeypatch):
     hard_link = tmpdir / "hardlink"
     os.link(str(victim), str(hard_link))  # hard_link.mklinkto is not implemented on win32
 
-    def os_unlink(_):
+    def Path_unlink(_):
         raise OSError(errno.ENOSPC, "Pretend that we ran out of space")
 
-    monkeypatch.setattr(os, "unlink", os_unlink)
+    monkeypatch.setattr(Path, "unlink", Path_unlink)
 
     with pytest.raises(OSError):
         safe_unlink(hard_link)

+ 12 - 11
src/borg/testsuite/shell_completions_test.py

@@ -1,14 +1,15 @@
-import os
 import subprocess
+from pathlib import Path
+
 import pytest
 
-SHELL_COMPLETIONS_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts", "shell_completions")
+SHELL_COMPLETIONS_DIR = Path(__file__).parent / ".." / ".." / ".." / "scripts" / "shell_completions"
 
 
 def test_bash_completion_is_valid():
     """Test that the bash completion file is valid bash syntax."""
-    bash_completion_file = os.path.join(SHELL_COMPLETIONS_DIR, "bash", "borg")
-    assert os.path.isfile(bash_completion_file)
+    bash_completion_file = SHELL_COMPLETIONS_DIR / "bash" / "borg"
+    assert bash_completion_file.is_file()
 
     # Check if bash is available
     try:
@@ -17,14 +18,14 @@ def test_bash_completion_is_valid():
         pytest.skip("bash not available")
 
     # Test if the bash completion file can be sourced without errors
-    result = subprocess.run(["bash", "-n", bash_completion_file], capture_output=True)
+    result = subprocess.run(["bash", "-n", str(bash_completion_file)], capture_output=True)
     assert result.returncode == 0, f"Bash completion file has syntax errors: {result.stderr.decode()}"
 
 
 def test_fish_completion_is_valid():
     """Test that the fish completion file is valid fish syntax."""
-    fish_completion_file = os.path.join(SHELL_COMPLETIONS_DIR, "fish", "borg.fish")
-    assert os.path.isfile(fish_completion_file)
+    fish_completion_file = SHELL_COMPLETIONS_DIR / "fish" / "borg.fish"
+    assert fish_completion_file.is_file()
 
     # Check if fish is available
     try:
@@ -33,14 +34,14 @@ def test_fish_completion_is_valid():
         pytest.skip("fish not available")
 
     # Test if the fish completion file can be sourced without errors
-    result = subprocess.run(["fish", "-c", f"source {fish_completion_file}"], capture_output=True)
+    result = subprocess.run(["fish", "-c", f"source {str(fish_completion_file)}"], capture_output=True)
     assert result.returncode == 0, f"Fish completion file has syntax errors: {result.stderr.decode()}"
 
 
 def test_zsh_completion_is_valid():
     """Test that the zsh completion file is valid zsh syntax."""
-    zsh_completion_file = os.path.join(SHELL_COMPLETIONS_DIR, "zsh", "_borg")
-    assert os.path.isfile(zsh_completion_file)
+    zsh_completion_file = SHELL_COMPLETIONS_DIR / "zsh" / "_borg"
+    assert zsh_completion_file.is_file()
 
     # Check if zsh is available
     try:
@@ -49,5 +50,5 @@ def test_zsh_completion_is_valid():
         pytest.skip("zsh not available")
 
     # Test if the zsh completion file can be sourced without errors
-    result = subprocess.run(["zsh", "-n", zsh_completion_file], capture_output=True)
+    result = subprocess.run(["zsh", "-n", str(zsh_completion_file)], capture_output=True)
     assert result.returncode == 0, f"Zsh completion file has syntax errors: {result.stderr.decode()}"