Browse Source

Merge pull request #8379 from ThomasWaldmann/backup-series

backup series, fixes #7930
TW 8 months ago
parent
commit
29c7ce4e1f

+ 6 - 6
docs/deployment/image-backup.rst

@@ -9,8 +9,8 @@ technique makes sure only the modified parts of the file are stored. Borg also h
 optional simple sparse file support for extract.
 optional simple sparse file support for extract.
 
 
 It is of utmost importancy to pin down the disk you want to backup.
 It is of utmost importancy to pin down the disk you want to backup.
-You need to use the SERIAL for that. 
-Use: 
+You need to use the SERIAL for that.
+Use:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
@@ -26,10 +26,10 @@ Use:
     echo "Disk Identifier: $DISK_ID"
     echo "Disk Identifier: $DISK_ID"
 
 
     # Use the following line to perform a borg backup for the full disk:
     # Use the following line to perform a borg backup for the full disk:
-    # borg create --read-special {now} "$DISK_ID"
+    # borg create --read-special disk-backup "$DISK_ID"
 
 
     # Use the following to perform a borg backup for all partitions of the disk
     # Use the following to perform a borg backup for all partitions of the disk
-    # borg create --read-special {now} "${PARTITIONS[@]}"
+    # borg create --read-special partitions-backup "${PARTITIONS[@]}"
 
 
     # Example output:
     # Example output:
     # Partitions of /dev/nvme1n1:
     # Partitions of /dev/nvme1n1:
@@ -37,8 +37,8 @@ Use:
     # /dev/nvme1n1p2
     # /dev/nvme1n1p2
     # /dev/nvme1n1p3
     # /dev/nvme1n1p3
     # Disk Identifier: /dev/nvme1n1
     # Disk Identifier: /dev/nvme1n1
-    # borg create --read-special {now} /dev/nvme1n1
-    # borg create --read-special {now} /dev/nvme1n1p1 /dev/nvme1n1p2 /dev/nvme1n1p3
+    # borg create --read-special disk-backup /dev/nvme1n1
+    # borg create --read-special partitions-backup /dev/nvme1n1p1 /dev/nvme1n1p2 /dev/nvme1n1p3
 
 
 
 
 
 

+ 6 - 6
docs/faq.rst

@@ -318,16 +318,16 @@ How do I configure different prune policies for different directories?
 
 
 Say you want to prune ``/var/log`` faster than the rest of
 Say you want to prune ``/var/log`` faster than the rest of
 ``/``. How do we implement that? The answer is to back up to different
 ``/``. How do we implement that? The answer is to back up to different
-archive *names* and then implement different prune policies for
-different prefixes. For example, you could have a script that does::
+archive *series* and then implement different prune policies for the
+different series. For example, you could have a script that does::
 
 
-    borg create --exclude var/log main-$(date +%Y-%m-%d) /
-    borg create logs-$(date +%Y-%m-%d) /var/log
+    borg create --exclude var/log main /
+    borg create logs /var/log
 
 
 Then you would have two different prune calls with different policies::
 Then you would have two different prune calls with different policies::
 
 
-    borg prune --verbose --list -d 30 -a 'sh:main-*'
-    borg prune --verbose --list -d 7  -a 'sh:logs-*'
+    borg prune --verbose --list -d 30 main
+    borg prune --verbose --list -d 7  logs
 
 
 This will keep 7 days of logs and 30 days of everything else.
 This will keep 7 days of logs and 30 days of everything else.
 
 

+ 20 - 20
docs/quickstart.rst

@@ -167,7 +167,7 @@ backed up and that the ``prune`` command keeps and deletes the correct backups.
         --exclude 'home/*/.cache/*'     \
         --exclude 'home/*/.cache/*'     \
         --exclude 'var/tmp/*'           \
         --exclude 'var/tmp/*'           \
                                         \
                                         \
-        '{hostname}-{now}'              \
+        '{hostname}'                    \
         /etc                            \
         /etc                            \
         /home                           \
         /home                           \
         /root                           \
         /root                           \
@@ -178,16 +178,16 @@ backed up and that the ``prune`` command keeps and deletes the correct backups.
     info "Pruning repository"
     info "Pruning repository"
 
 
     # Use the `prune` subcommand to maintain 7 daily, 4 weekly and 6 monthly
     # Use the `prune` subcommand to maintain 7 daily, 4 weekly and 6 monthly
-    # archives of THIS machine. The '{hostname}-*' globbing is very important to
-    # limit prune's operation to this machine's archives and not apply to
-    # other machines' archives also:
-
-    borg prune                              \
-        --list                              \
-        --match-archives 'sh:{hostname}-*'  \
-        --show-rc                           \
-        --keep-daily    7                   \
-        --keep-weekly   4                   \
+    # archives of THIS machine. The '{hostname}' matching is very important to
+    # limit prune's operation to archives with exactly that name and not apply
+    # to archives with other names also:
+
+    borg prune               \
+        '{hostname}'         \
+        --list               \
+        --show-rc            \
+        --keep-daily    7    \
+        --keep-weekly   4    \
         --keep-monthly  6
         --keep-monthly  6
 
 
     prune_exit=$?
     prune_exit=$?
@@ -196,7 +196,7 @@ backed up and that the ``prune`` command keeps and deletes the correct backups.
 
 
     info "Compacting repository"
     info "Compacting repository"
 
 
-    borg compact
+    borg compact -v
 
 
     compact_exit=$?
     compact_exit=$?
 
 
@@ -501,11 +501,11 @@ Example with **borg mount**:
 
 
     # open a new, separate terminal (this terminal will be blocked until umount)
     # open a new, separate terminal (this terminal will be blocked until umount)
 
 
-    # now we find out the archive names we have in the repo:
+    # now we find out the archive ID of the archive we want to mount:
     borg repo-list
     borg repo-list
 
 
-    # mount one archive from a borg repo:
-    borg mount -a myserver-system-2019-08-11 /mnt/borg
+    # mount one archive giving its archive ID prefix:
+    borg mount -a aid:d34db33f /mnt/borg
 
 
     # alternatively, mount all archives from a borg repo (slower):
     # alternatively, mount all archives from a borg repo (slower):
     borg mount /mnt/borg
     borg mount /mnt/borg
@@ -527,17 +527,17 @@ Example with **borg extract**:
     mkdir borg_restore
     mkdir borg_restore
     cd borg_restore
     cd borg_restore
 
 
-    # now we find out the archive names we have in the repo:
+    # now we find out the archive ID of the archive we want to extract:
     borg repo-list
     borg repo-list
 
 
-    # we could find out the archive contents, esp. the path layout:
-    borg list myserver-system-2019-08-11
+    # find out how the paths stored in the the archive look like:
+    borg list aid:d34db33f
 
 
     # we extract only some specific path (note: no leading / !):
     # we extract only some specific path (note: no leading / !):
-    borg extract myserver-system-2019-08-11 path/to/extract
+    borg extract aid:d34db33f path/to/extract
 
 
     # alternatively, we could fully extract the archive:
     # alternatively, we could fully extract the archive:
-    borg extract myserver-system-2019-08-11
+    borg extract aid:d34db33f
 
 
     # now move the files to the correct place...
     # now move the files to the correct place...
 
 

+ 16 - 18
docs/quickstart_example.rst.inc

@@ -3,13 +3,13 @@
     $ borg -r /path/to/repo repo-create --encryption=repokey-aes-ocb
     $ borg -r /path/to/repo repo-create --encryption=repokey-aes-ocb
 
 
 2. Back up the ``~/src`` and ``~/Documents`` directories into an archive called
 2. Back up the ``~/src`` and ``~/Documents`` directories into an archive called
-   *Monday*::
+   *docs*::
 
 
-    $ borg -r /path/to/repo create Monday ~/src ~/Documents
+    $ borg -r /path/to/repo create docs ~/src ~/Documents
 
 
-3. The next day create a new archive called *Tuesday*::
+3. The next day create a new archive using the same archive name::
 
 
-    $ borg -r /path/to/repo create --stats Tuesday ~/src ~/Documents
+    $ borg -r /path/to/repo create --stats docs ~/src ~/Documents
 
 
    This backup will be a lot quicker and a lot smaller since only new, never
    This backup will be a lot quicker and a lot smaller since only new, never
    before seen data is stored. The ``--stats`` option causes Borg to
    before seen data is stored. The ``--stats`` option causes Borg to
@@ -17,7 +17,7 @@
    size (the amount of unique data not shared with other archives)::
    size (the amount of unique data not shared with other archives)::
 
 
     Repository: /path/to/repo
     Repository: /path/to/repo
-    Archive name: Tuesday
+    Archive name: docs
     Archive fingerprint: bcd1b53f9b4991b7afc2b339f851b7ffe3c6d030688936fe4552eccc1877718d
     Archive fingerprint: bcd1b53f9b4991b7afc2b339f851b7ffe3c6d030688936fe4552eccc1877718d
     Time (start): Sat, 2022-06-25 20:21:43
     Time (start): Sat, 2022-06-25 20:21:43
     Time (end):   Sat, 2022-06-25 20:21:43
     Time (end):   Sat, 2022-06-25 20:21:43
@@ -30,32 +30,30 @@
 4. List all archives in the repository::
 4. List all archives in the repository::
 
 
     $ borg -r /path/to/repo repo-list
     $ borg -r /path/to/repo repo-list
-    Monday                               Sat, 2022-06-25 20:21:14 [b80e24d2...b179f298]
-    Tuesday                              Sat, 2022-06-25 20:21:43 [bcd1b53f...1877718d]
+    docs                                 Sat, 2022-06-25 20:21:14 [b80e24d2...b179f298]
+    docs                                 Sat, 2022-06-25 20:21:43 [bcd1b53f...1877718d]
 
 
-5. List the contents of the *Monday* archive::
+5. List the contents of the first archive::
 
 
-    $ borg -r /path/to/repo list Monday
+    $ borg -r /path/to/repo list aid:b80e24d2
     drwxr-xr-x user   group          0 Mon, 2016-02-15 18:22:30 home/user/Documents
     drwxr-xr-x user   group          0 Mon, 2016-02-15 18:22:30 home/user/Documents
     -rw-r--r-- user   group       7961 Mon, 2016-02-15 18:22:30 home/user/Documents/Important.doc
     -rw-r--r-- user   group       7961 Mon, 2016-02-15 18:22:30 home/user/Documents/Important.doc
     ...
     ...
 
 
-6. Restore the *Monday* archive by extracting the files relative to the current directory::
+6. Restore the first archive by extracting the files relative to the current directory::
 
 
-    $ borg -r /path/to/repo extract Monday
+    $ borg -r /path/to/repo extract aid:b80e24d2
 
 
-7. Delete the *Monday* archive (please note that this does **not** free repo disk space)::
+7. Delete the first archive (please note that this does **not** free repo disk space)::
 
 
-    $ borg -r /path/to/repo delete -a Monday
+    $ borg -r /path/to/repo delete aid:b80e24d2
 
 
-   Please note the ``-a`` option here (short for ``--match-archives``) which enables you
-   to give a pattern to delete multiple archives, like ``-a 'sh:oldcrap-*'``.
-   You can also combine this with ``--first``, ``--last`` and ``--sort-by``.
-   Be careful, always first use with ``--dry-run`` and ``--list``!
+   Be careful if you use an archive NAME (and not an archive ID), that might match multiple archives!
+   Always first use with ``--dry-run`` and ``--list``!
 
 
 8. Recover disk space by compacting the segment files in the repo::
 8. Recover disk space by compacting the segment files in the repo::
 
 
-    $ borg -r /path/to/repo compact
+    $ borg -r /path/to/repo compact -v
 
 
 .. Note::
 .. Note::
     Borg is quiet by default (it defaults to WARNING log level).
     Borg is quiet by default (it defaults to WARNING log level).

+ 8 - 12
docs/usage/create.rst

@@ -23,18 +23,18 @@ Examples
     # /home/<one directory>/.thumbnails is excluded, not /home/*/*/.thumbnails etc.)
     # /home/<one directory>/.thumbnails is excluded, not /home/*/*/.thumbnails etc.)
     $ borg create my-files /home --exclude 'sh:home/*/.thumbnails'
     $ borg create my-files /home --exclude 'sh:home/*/.thumbnails'
 
 
-    # Backup the root filesystem into an archive named "root-YYYY-MM-DD"
+    # Backup the root filesystem into an archive named "root-archive"
     # use zlib compression (good, but slow) - default is lz4 (fast, low compression ratio)
     # use zlib compression (good, but slow) - default is lz4 (fast, low compression ratio)
-    $ borg create -C zlib,6 --one-file-system root-{now:%Y-%m-%d} /
+    $ borg create -C zlib,6 --one-file-system root-archive /
 
 
-    # Backup into an archive name like FQDN-root-TIMESTAMP
-    $ borg create '{fqdn}-root-{now}' /
+    # Backup into an archive name like FQDN-root
+    $ borg create '{fqdn}-root' /
 
 
     # Backup a remote host locally ("pull" style) using sshfs
     # Backup a remote host locally ("pull" style) using sshfs
     $ mkdir sshfs-mount
     $ mkdir sshfs-mount
     $ sshfs root@example.com:/ sshfs-mount
     $ sshfs root@example.com:/ sshfs-mount
     $ cd sshfs-mount
     $ cd sshfs-mount
-    $ borg create example.com-root-{now:%Y-%m-%d} .
+    $ borg create example.com-root .
     $ cd ..
     $ cd ..
     $ fusermount -u sshfs-mount
     $ fusermount -u sshfs-mount
 
 
@@ -63,17 +63,13 @@ Examples
     # Only compress compressible data with lzma,N (N = 0..9)
     # Only compress compressible data with lzma,N (N = 0..9)
     $ borg create --compression auto,lzma,N arch ~
     $ borg create --compression auto,lzma,N arch ~
 
 
-    # Use short hostname, user name and current time in archive name
-    $ borg create '{hostname}-{user}-{now}' ~
-    # Similar, use the same datetime format that is default as of borg 1.1
-    $ borg create '{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S}' ~
-    # As above, but add nanoseconds
-    $ borg create '{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S.%f}' ~
+    # Use short hostname and user name as archive name
+    $ borg create '{hostname}-{user}' ~
 
 
     # Backing up relative paths by moving into the correct directory first
     # Backing up relative paths by moving into the correct directory first
     $ cd /home/user/Documents
     $ cd /home/user/Documents
     # The root directory of the archive will be "projectA"
     # The root directory of the archive will be "projectA"
-    $ borg create 'daily-projectA-{now:%Y-%m-%d}' projectA
+    $ borg create 'daily-projectA' projectA
 
 
     # Use external command to determine files to archive
     # Use external command to determine files to archive
     # Use --paths-from-stdin with find to back up only files less than 1MB in size
     # Use --paths-from-stdin with find to back up only files less than 1MB in size

+ 5 - 2
docs/usage/delete.rst

@@ -4,11 +4,14 @@ Examples
 ~~~~~~~~
 ~~~~~~~~
 ::
 ::
 
 
-    # delete a single backup archive:
-    $ borg delete -a Monday
+    # delete all backup archives named "kenny-files":
+    $ borg delete -a kenny-files
     # actually free disk space:
     # actually free disk space:
     $ borg compact
     $ borg compact
 
 
+    # delete a specific backup archive using its unique archive ID prefix
+    $ borg delete aid:d34db33f
+
     # delete all archives whose names begin with the machine's hostname followed by "-"
     # delete all archives whose names begin with the machine's hostname followed by "-"
     $ borg delete -a 'sh:{hostname}-*'
     $ borg delete -a 'sh:{hostname}-*'
 
 

+ 2 - 3
docs/usage/help.rst.inc

@@ -342,9 +342,8 @@ If literal curly braces need to be used, double them for escaping::
 
 
 Examples::
 Examples::
 
 
-    borg create /path/to/repo::{hostname}-{user}-{utcnow} ...
-    borg create /path/to/repo::{hostname}-{now:%Y-%m-%d_%H:%M:%S%z} ...
-    borg prune -a 'sh:{hostname}-*' ...
+    borg create '{hostname}-{user}' ...
+    borg prune '{hostname}-{user}' ...
 
 
 .. note::
 .. note::
     systemd uses a difficult, non-standard syntax for command lines in unit files (refer to
     systemd uses a difficult, non-standard syntax for command lines in unit files (refer to

+ 3 - 3
docs/usage/info.rst

@@ -4,8 +4,8 @@ Examples
 ~~~~~~~~
 ~~~~~~~~
 ::
 ::
 
 
-    $ borg info Tuesday2022-06-25T20:51:39
-    Archive name: Tuesday2022-06-25T20:51:39
+    $ borg info aid:f7dea078
+    Archive name: source-backup
     Archive fingerprint: f7dea0788dfc026cc2be1c0f5b94beb4e4084eb3402fc40c38d8719b1bf2d943
     Archive fingerprint: f7dea0788dfc026cc2be1c0f5b94beb4e4084eb3402fc40c38d8719b1bf2d943
     Comment:
     Comment:
     Hostname: mba2020
     Hostname: mba2020
@@ -13,7 +13,7 @@ Examples
     Time (start): Sat, 2022-06-25 20:51:40
     Time (start): Sat, 2022-06-25 20:51:40
     Time (end): Sat, 2022-06-25 20:51:40
     Time (end): Sat, 2022-06-25 20:51:40
     Duration: 0.03 seconds
     Duration: 0.03 seconds
-    Command line: /Users/tw/w/borg-env/bin/borg -r path/to/repo create --stats 'Tuesday{now}' src --progress
+    Command line: /usr/bin/borg -r path/to/repo create source-backup src
     Utilization of maximum supported archive size: 0%
     Utilization of maximum supported archive size: 0%
     Number of files: 244
     Number of files: 244
     Original size: 13.80 MB
     Original size: 13.80 MB

+ 14 - 4
docs/usage/prune.rst

@@ -7,24 +7,34 @@ Be careful, prune is a potentially dangerous command, it will remove backup
 archives.
 archives.
 
 
 The default of prune is to apply to **all archives in the repository** unless
 The default of prune is to apply to **all archives in the repository** unless
-you restrict its operation to a subset of the archives using ``-a`` / ``--match-archives``.
+you restrict its operation to a subset of the archives.
+
+The recommended way to name archives (with ``borg create``) is to use the
+identical archive name within a series of archives. Then you can simply give
+that name to prune also, so it operates just on that series of archives.
+
+Alternatively, you can use ``-a`` / ``--match-archives`` to do a match on the
+archive names to select some of them.
 When using ``-a``, be careful to choose a good pattern - e.g. do not use a
 When using ``-a``, be careful to choose a good pattern - e.g. do not use a
 prefix "foo" if you do not also want to match "foobar".
 prefix "foo" if you do not also want to match "foobar".
 
 
 It is strongly recommended to always run ``prune -v --list --dry-run ...``
 It is strongly recommended to always run ``prune -v --list --dry-run ...``
 first so you will see what it would do without it actually doing anything.
 first so you will see what it would do without it actually doing anything.
 
 
+Don't forget to run ``borg compact -v`` after prune to actually free disk space.
+
 ::
 ::
 
 
     # Keep 7 end of day and 4 additional end of week archives.
     # Keep 7 end of day and 4 additional end of week archives.
     # Do a dry-run without actually deleting anything.
     # Do a dry-run without actually deleting anything.
     $ borg prune -v --list --dry-run --keep-daily=7 --keep-weekly=4
     $ borg prune -v --list --dry-run --keep-daily=7 --keep-weekly=4
 
 
-    # Same as above but only apply to archive names starting with the hostname
+    # Similar as above but only apply to the archive series named '{hostname}':
+    $ borg prune -v --list --keep-daily=7 --keep-weekly=4 '{hostname}'
+
+    # Similar as above but apply to archive names starting with the hostname
     # of the machine followed by a "-" character:
     # of the machine followed by a "-" character:
     $ borg prune -v --list --keep-daily=7 --keep-weekly=4 -a 'sh:{hostname}-*'
     $ borg prune -v --list --keep-daily=7 --keep-weekly=4 -a 'sh:{hostname}-*'
-    # actually free disk space:
-    $ borg compact
 
 
     # Keep 7 end of day, 4 additional end of week archives,
     # Keep 7 end of day, 4 additional end of week archives,
     # and an end of month archive for every month:
     # and an end of month archive for every month:

+ 2 - 2
docs/usage/tar.rst

@@ -34,9 +34,9 @@ Outputs a script that copies all archives from repo1 to repo2:
 
 
 ::
 ::
 
 
-    for A T in `borg list --format='{archive} {time:%Y-%m-%dT%H:%M:%S}{NL}'`
+    for N I T in `borg list --format='{archive} {id} {time:%Y-%m-%dT%H:%M:%S}{NL}'`
     do
     do
-      echo "borg -r repo1 export-tar --tar-format=BORG $A - | borg -r repo2 import-tar --timestamp=$T $A -"
+      echo "borg -r repo1 export-tar --tar-format=BORG aid:$I - | borg -r repo2 import-tar --timestamp=$T $N -"
     done
     done
 
 
 Kept:
 Kept:

+ 37 - 49
src/borg/archive.py

@@ -442,6 +442,7 @@ class Archive:
         self,
         self,
         manifest,
         manifest,
         name,
         name,
+        *,
         cache=None,
         cache=None,
         create=False,
         create=False,
         numeric_ids=False,
         numeric_ids=False,
@@ -458,6 +459,7 @@ class Archive:
         log_json=False,
         log_json=False,
         iec=False,
         iec=False,
     ):
     ):
+        name_is_id = isinstance(name, bytes)
         self.cwd = os.getcwd()
         self.cwd = os.getcwd()
         assert isinstance(manifest, Manifest)
         assert isinstance(manifest, Manifest)
         self.manifest = manifest
         self.manifest = manifest
@@ -493,10 +495,12 @@ class Archive:
         self.create = create
         self.create = create
         if self.create:
         if self.create:
             self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
             self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
-            if manifest.archives.exists(name):
-                raise self.AlreadyExists(name)
         else:
         else:
-            info = self.manifest.archives.get(name)
+            if name_is_id:
+                # we also go over the manifest here to avoid quick&dirty deleted archives
+                info = self.manifest.archives.get_by_id(name)
+            else:
+                info = self.manifest.archives.get(name)
             if info is None:
             if info is None:
                 raise self.DoesNotExist(name)
                 raise self.DoesNotExist(name)
             self.load(info.id)
             self.load(info.id)
@@ -611,8 +615,6 @@ Duration: {0.duration}
 
 
     def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
     def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
         name = name or self.name
         name = name or self.name
-        if self.manifest.archives.exists(name):
-            raise self.AlreadyExists(name)
         self.items_buffer.flush(flush=True)
         self.items_buffer.flush(flush=True)
         item_ptrs = archive_put_items(
         item_ptrs = archive_put_items(
             self.items_buffer.chunks, repo_objs=self.repo_objs, cache=self.cache, stats=self.stats
             self.items_buffer.chunks, repo_objs=self.repo_objs, cache=self.cache, stats=self.stats
@@ -956,18 +958,16 @@ Duration: {0.duration}
         self.id = new_id
         self.id = new_id
 
 
     def rename(self, name):
     def rename(self, name):
-        if self.manifest.archives.exists(name):
-            raise self.AlreadyExists(name)
-        oldname = self.name
+        old_id = self.id
         self.name = name
         self.name = name
         self.set_meta("name", name)
         self.set_meta("name", name)
-        self.manifest.archives.delete(oldname)
+        self.manifest.archives.delete_by_id(old_id)
 
 
     def delete(self):
     def delete(self):
         # quick and dirty: we just nuke the archive from the archives list - that will
         # quick and dirty: we just nuke the archive from the archives list - that will
         # potentially orphan all chunks previously referenced by the archive, except the ones also
         # potentially orphan all chunks previously referenced by the archive, except the ones also
         # referenced by other archives. In the end, "borg compact" will clean up and free space.
         # referenced by other archives. In the end, "borg compact" will clean up and free space.
-        self.manifest.archives.delete(self.name)
+        self.manifest.archives.delete_by_id(self.id)
 
 
     @staticmethod
     @staticmethod
     def compare_archives_iter(
     def compare_archives_iter(
@@ -1818,23 +1818,13 @@ class ArchiveChecker:
                 archive = self.key.unpack_archive(data)
                 archive = self.key.unpack_archive(data)
                 archive = ArchiveItem(internal_dict=archive)
                 archive = ArchiveItem(internal_dict=archive)
                 name = archive.name
                 name = archive.name
-                logger.info(f"Found archive {name}, id {bin_to_hex(chunk_id)}.")
-                if self.manifest.archives.exists_name_and_id(name, chunk_id):
+                archive_id, archive_id_hex = chunk_id, bin_to_hex(chunk_id)
+                logger.info(f"Found archive {name} {archive_id_hex}.")
+                if self.manifest.archives.exists_name_and_id(name, archive_id):
                     logger.info("We already have an archives directory entry for this.")
                     logger.info("We already have an archives directory entry for this.")
-                elif not self.manifest.archives.exists(name):
-                    # no archives list entry yet and name is not taken yet, create an entry
-                    logger.warning(f"Creating archives directory entry for {name}.")
-                    self.manifest.archives.create(name, chunk_id, archive.time)
                 else:
                 else:
-                    # we don't have an entry yet, but the name is taken by something else
-                    i = 1
-                    while True:
-                        new_name = "%s.%d" % (name, i)
-                        if not self.manifest.archives.exists(new_name):
-                            break
-                        i += 1
-                    logger.warning(f"Creating archives directory entry using {new_name}.")
-                    self.manifest.archives.create(new_name, chunk_id, archive.time)
+                    logger.warning(f"Creating archives directory entry for {name} {archive_id_hex}.")
+                    self.manifest.archives.create(name, archive_id, archive.time)
         pi.finish()
         pi.finish()
         logger.info("Rebuilding missing archives directory entries completed.")
         logger.info("Rebuilding missing archives directory entries completed.")
 
 
@@ -2046,28 +2036,28 @@ class ArchiveChecker:
         with cache_if_remote(self.repository) as repository:
         with cache_if_remote(self.repository) as repository:
             for i, info in enumerate(archive_infos):
             for i, info in enumerate(archive_infos):
                 pi.show(i)
                 pi.show(i)
-                logger.info(f"Analyzing archive {info.name} ({i + 1}/{num_archives})")
-                archive_id = info.id
+                archive_id, archive_id_hex = info.id, bin_to_hex(info.id)
+                logger.info(f"Analyzing archive {info.name} {archive_id_hex} ({i + 1}/{num_archives})")
                 if archive_id not in self.chunks:
                 if archive_id not in self.chunks:
-                    logger.error("Archive metadata block %s is missing!", bin_to_hex(archive_id))
+                    logger.error(f"Archive metadata block {archive_id_hex} is missing!")
                     self.error_found = True
                     self.error_found = True
                     if self.repair:
                     if self.repair:
-                        logger.error(f"Deleting broken archive {info.name}.")
-                        self.manifest.archives.delete(info.name)
+                        logger.error(f"Deleting broken archive {info.name} {archive_id_hex}.")
+                        self.manifest.archives.delete_by_id(archive_id)
                     else:
                     else:
-                        logger.error(f"Would delete broken archive {info.name}.")
+                        logger.error(f"Would delete broken archive {info.name} {archive_id_hex}.")
                     continue
                     continue
                 cdata = self.repository.get(archive_id)
                 cdata = self.repository.get(archive_id)
                 try:
                 try:
                     _, data = self.repo_objs.parse(archive_id, cdata, ro_type=ROBJ_ARCHIVE_META)
                     _, data = self.repo_objs.parse(archive_id, cdata, ro_type=ROBJ_ARCHIVE_META)
                 except IntegrityError as integrity_error:
                 except IntegrityError as integrity_error:
-                    logger.error("Archive metadata block %s is corrupted: %s", bin_to_hex(archive_id), integrity_error)
+                    logger.error(f"Archive metadata block {archive_id_hex} is corrupted: {integrity_error}")
                     self.error_found = True
                     self.error_found = True
                     if self.repair:
                     if self.repair:
-                        logger.error(f"Deleting broken archive {info.name}.")
-                        self.manifest.archives.delete(info.name)
+                        logger.error(f"Deleting broken archive {info.name} {archive_id_hex}.")
+                        self.manifest.archives.delete_by_id(archive_id)
                     else:
                     else:
-                        logger.error(f"Would delete broken archive {info.name}.")
+                        logger.error(f"Would delete broken archive {info.name} {archive_id_hex}.")
                     continue
                     continue
                 archive = self.key.unpack_archive(data)
                 archive = self.key.unpack_archive(data)
                 archive = ArchiveItem(internal_dict=archive)
                 archive = ArchiveItem(internal_dict=archive)
@@ -2090,7 +2080,9 @@ class ArchiveChecker:
                     logger.debug(f"archive id new: {bin_to_hex(new_archive_id)}")
                     logger.debug(f"archive id new: {bin_to_hex(new_archive_id)}")
                     cdata = self.repo_objs.format(new_archive_id, {}, data, ro_type=ROBJ_ARCHIVE_META)
                     cdata = self.repo_objs.format(new_archive_id, {}, data, ro_type=ROBJ_ARCHIVE_META)
                     add_reference(new_archive_id, len(data), cdata)
                     add_reference(new_archive_id, len(data), cdata)
-                    self.manifest.archives.create(info.name, new_archive_id, info.ts, overwrite=True)
+                    self.manifest.archives.create(info.name, new_archive_id, info.ts)
+                    if archive_id != new_archive_id:
+                        self.manifest.archives.delete_by_id(archive_id)
             pi.finish()
             pi.finish()
 
 
     def finish(self):
     def finish(self):
@@ -2148,18 +2140,16 @@ class ArchiveRecreater:
         self.progress = progress
         self.progress = progress
         self.print_file_status = file_status_printer or (lambda *args: None)
         self.print_file_status = file_status_printer or (lambda *args: None)
 
 
-    def recreate(self, archive_name, comment=None, target_name=None):
-        assert not self.is_temporary_archive(archive_name)
-        archive = self.open_archive(archive_name)
+    def recreate(self, archive_id, target_name, delete_original, comment=None):
+        archive = self.open_archive(archive_id)
         target = self.create_target(archive, target_name)
         target = self.create_target(archive, target_name)
         if self.exclude_if_present or self.exclude_caches:
         if self.exclude_if_present or self.exclude_caches:
             self.matcher_add_tagged_dirs(archive)
             self.matcher_add_tagged_dirs(archive)
-        if self.matcher.empty() and not target.recreate_rechunkify and comment is None and target_name is None:
+        if self.matcher.empty() and not target.recreate_rechunkify and comment is None:
             # nothing to do
             # nothing to do
             return False
             return False
         self.process_items(archive, target)
         self.process_items(archive, target)
-        replace_original = target_name is None
-        self.save(archive, target, comment, replace_original=replace_original)
+        self.save(archive, target, comment, delete_original=delete_original)
         return True
         return True
 
 
     def process_items(self, archive, target):
     def process_items(self, archive, target):
@@ -2216,7 +2206,7 @@ class ArchiveRecreater:
             for chunk in chunk_iterator:
             for chunk in chunk_iterator:
                 yield Chunk(chunk, size=len(chunk), allocation=CH_DATA)
                 yield Chunk(chunk, size=len(chunk), allocation=CH_DATA)
 
 
-    def save(self, archive, target, comment=None, replace_original=True):
+    def save(self, archive, target, comment=None, delete_original=True):
         if self.dry_run:
         if self.dry_run:
             return
             return
         if comment is None:
         if comment is None:
@@ -2242,9 +2232,8 @@ class ArchiveRecreater:
             }
             }
 
 
         target.save(comment=comment, timestamp=self.timestamp, additional_metadata=additional_metadata)
         target.save(comment=comment, timestamp=self.timestamp, additional_metadata=additional_metadata)
-        if replace_original:
+        if delete_original:
             archive.delete()
             archive.delete()
-            target.rename(archive.name)
         if self.stats:
         if self.stats:
             target.start = _start
             target.start = _start
             target.end = archive_ts_now()
             target.end = archive_ts_now()
@@ -2277,9 +2266,8 @@ class ArchiveRecreater:
         matcher.add(tag_files, IECommand.Include)
         matcher.add(tag_files, IECommand.Include)
         matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
         matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
 
 
-    def create_target(self, archive, target_name=None):
+    def create_target(self, archive, target_name):
         """Create target archive."""
         """Create target archive."""
-        target_name = target_name or archive.name + ".recreate"
         target = self.create_target_archive(target_name)
         target = self.create_target_archive(target_name)
         # If the archives use the same chunker params, then don't rechunkify
         # If the archives use the same chunker params, then don't rechunkify
         source_chunker_params = tuple(archive.metadata.get("chunker_params", []))
         source_chunker_params = tuple(archive.metadata.get("chunker_params", []))
@@ -2308,5 +2296,5 @@ class ArchiveRecreater:
         )
         )
         return target
         return target
 
 
-    def open_archive(self, name, **kwargs):
-        return Archive(self.manifest, name, cache=self.cache, **kwargs)
+    def open_archive(self, archive_id, **kwargs):
+        return Archive(self.manifest, archive_id, cache=self.cache, **kwargs)

+ 2 - 1
src/borg/archiver/_common.py

@@ -257,9 +257,10 @@ def with_archive(method):
     def wrapper(self, args, repository, manifest, **kwargs):
     def wrapper(self, args, repository, manifest, **kwargs):
         archive_name = getattr(args, "name", None)
         archive_name = getattr(args, "name", None)
         assert archive_name is not None
         assert archive_name is not None
+        archive_info = manifest.archives.get_one(archive_name)
         archive = Archive(
         archive = Archive(
             manifest,
             manifest,
-            archive_name,
+            archive_info.id,
             numeric_ids=getattr(args, "numeric_ids", False),
             numeric_ids=getattr(args, "numeric_ids", False),
             noflags=getattr(args, "noflags", False),
             noflags=getattr(args, "noflags", False),
             noacls=getattr(args, "noacls", False),
             noacls=getattr(args, "noacls", False),

+ 3 - 1
src/borg/archiver/create_cmd.py

@@ -575,7 +575,9 @@ class CreateMixIn:
         The archive will consume almost no disk space for files or parts of files that
         The archive will consume almost no disk space for files or parts of files that
         have already been stored in other archives.
         have already been stored in other archives.
 
 
-        The archive name needs to be unique.
+        The archive name does NOT need to be unique, you can and should use the same
+        name for a series of archives. The unique archive identifier is its ID (hash)
+        and you can abbreviate the ID as long as it is unique.
 
 
         In the archive name, you may use the following placeholders:
         In the archive name, you may use the following placeholders:
         {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others.
         {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others.

+ 4 - 2
src/borg/archiver/debug_cmd.py

@@ -32,7 +32,8 @@ class DebugMixIn:
     def do_debug_dump_archive_items(self, args, repository, manifest):
     def do_debug_dump_archive_items(self, args, repository, manifest):
         """dump (decrypted, decompressed) archive items metadata (not: data)"""
         """dump (decrypted, decompressed) archive items metadata (not: data)"""
         repo_objs = manifest.repo_objs
         repo_objs = manifest.repo_objs
-        archive = Archive(manifest, args.name)
+        archive_info = manifest.archives.get_one(args.name)
+        archive = Archive(manifest, archive_info.id)
         for i, item_id in enumerate(archive.metadata.items):
         for i, item_id in enumerate(archive.metadata.items):
             _, data = repo_objs.parse(item_id, repository.get(item_id), ro_type=ROBJ_ARCHIVE_STREAM)
             _, data = repo_objs.parse(item_id, repository.get(item_id), ro_type=ROBJ_ARCHIVE_STREAM)
             filename = "%06d_%s.items" % (i, bin_to_hex(item_id))
             filename = "%06d_%s.items" % (i, bin_to_hex(item_id))
@@ -44,9 +45,10 @@ class DebugMixIn:
     @with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
     @with_repository(compatibility=Manifest.NO_OPERATION_CHECK)
     def do_debug_dump_archive(self, args, repository, manifest):
     def do_debug_dump_archive(self, args, repository, manifest):
         """dump decoded archive metadata (not: data)"""
         """dump decoded archive metadata (not: data)"""
+        archive_info = manifest.archives.get_one(args.name)
         repo_objs = manifest.repo_objs
         repo_objs = manifest.repo_objs
         try:
         try:
-            archive_meta_orig = manifest.archives.get(args.name, raw=True)
+            archive_meta_orig = manifest.archives.get_by_id(archive_info.id, raw=True)
         except KeyError:
         except KeyError:
             raise Archive.DoesNotExist(args.name)
             raise Archive.DoesNotExist(args.name)
 
 

+ 17 - 8
src/borg/archiver/delete_cmd.py

@@ -3,7 +3,7 @@ import logging
 
 
 from ._common import with_repository
 from ._common import with_repository
 from ..constants import *  # NOQA
 from ..constants import *  # NOQA
-from ..helpers import format_archive, CommandError
+from ..helpers import format_archive, CommandError, bin_to_hex, archivename_validator
 from ..manifest import Manifest
 from ..manifest import Manifest
 
 
 from ..logger import create_logger
 from ..logger import create_logger
@@ -18,10 +18,14 @@ class DeleteMixIn:
         self.output_list = args.output_list
         self.output_list = args.output_list
         dry_run = args.dry_run
         dry_run = args.dry_run
         manifest = Manifest.load(repository, (Manifest.Operation.DELETE,))
         manifest = Manifest.load(repository, (Manifest.Operation.DELETE,))
-        archive_names = tuple(x.name for x in manifest.archives.list_considering(args))
-        if not archive_names:
+        if args.name:
+            archive_infos = [manifest.archives.get_one(args.name)]
+        else:
+            archive_infos = manifest.archives.list_considering(args)
+        count = len(archive_infos)
+        if count == 0:
             return
             return
-        if args.match_archives is None and args.first == 0 and args.last == 0:
+        if not args.name and args.match_archives is None and args.first == 0 and args.last == 0:
             raise CommandError(
             raise CommandError(
                 "Aborting: if you really want to delete all archives, please use -a 'sh:*' "
                 "Aborting: if you really want to delete all archives, please use -a 'sh:*' "
                 "or just delete the whole repository (might be much faster)."
                 "or just delete the whole repository (might be much faster)."
@@ -29,18 +33,20 @@ class DeleteMixIn:
 
 
         deleted = False
         deleted = False
         logger_list = logging.getLogger("borg.output.list")
         logger_list = logging.getLogger("borg.output.list")
-        for i, archive_name in enumerate(archive_names, 1):
+        for i, archive_info in enumerate(archive_infos, 1):
+            name, id, hex_id = archive_info.name, archive_info.id, bin_to_hex(archive_info.id)
             try:
             try:
                 # this does NOT use Archive.delete, so this code hopefully even works in cases a corrupt archive
                 # this does NOT use Archive.delete, so this code hopefully even works in cases a corrupt archive
                 # would make the code in class Archive crash, so the user can at least get rid of such archives.
                 # would make the code in class Archive crash, so the user can at least get rid of such archives.
-                current_archive = manifest.archives.delete(archive_name)
+                if not dry_run:
+                    manifest.archives.delete_by_id(id)
             except KeyError:
             except KeyError:
-                self.print_warning(f"Archive {archive_name} not found ({i}/{len(archive_names)}).")
+                self.print_warning(f"Archive {name} {hex_id} not found ({i}/{count}).")
             else:
             else:
                 deleted = True
                 deleted = True
                 if self.output_list:
                 if self.output_list:
                     msg = "Would delete: {} ({}/{})" if dry_run else "Deleted archive: {} ({}/{})"
                     msg = "Would delete: {} ({}/{})" if dry_run else "Deleted archive: {} ({}/{})"
-                    logger_list.info(msg.format(format_archive(current_archive), i, len(archive_names)))
+                    logger_list.info(msg.format(format_archive(archive_info), i, count))
         if dry_run:
         if dry_run:
             logger.info("Finished dry-run.")
             logger.info("Finished dry-run.")
         elif deleted:
         elif deleted:
@@ -82,3 +88,6 @@ class DeleteMixIn:
             "--list", dest="output_list", action="store_true", help="output verbose list of archives"
             "--list", dest="output_list", action="store_true", help="output verbose list of archives"
         )
         )
         define_archive_filters_group(subparser)
         define_archive_filters_group(subparser)
+        subparser.add_argument(
+            "name", metavar="NAME", nargs="?", type=archivename_validator, help="specify the archive name"
+        )

+ 11 - 5
src/borg/archiver/info_cmd.py

@@ -5,7 +5,7 @@ from datetime import timedelta
 from ._common import with_repository
 from ._common import with_repository
 from ..archive import Archive
 from ..archive import Archive
 from ..constants import *  # NOQA
 from ..constants import *  # NOQA
-from ..helpers import format_timedelta, json_print, basic_json_data
+from ..helpers import format_timedelta, json_print, basic_json_data, archivename_validator
 from ..manifest import Manifest
 from ..manifest import Manifest
 
 
 from ..logger import create_logger
 from ..logger import create_logger
@@ -18,12 +18,15 @@ class InfoMixIn:
     def do_info(self, args, repository, manifest, cache):
     def do_info(self, args, repository, manifest, cache):
         """Show archive details such as disk space used"""
         """Show archive details such as disk space used"""
 
 
-        archive_names = tuple(x.name for x in manifest.archives.list_considering(args))
+        if args.name:
+            archive_infos = [manifest.archives.get_one(args.name)]
+        else:
+            archive_infos = manifest.archives.list_considering(args)
 
 
         output_data = []
         output_data = []
 
 
-        for i, archive_name in enumerate(archive_names, 1):
-            archive = Archive(manifest, archive_name, cache=cache, iec=args.iec)
+        for i, archive_info in enumerate(archive_infos, 1):
+            archive = Archive(manifest, archive_info.id, cache=cache, iec=args.iec)
             info = archive.info()
             info = archive.info()
             if args.json:
             if args.json:
                 output_data.append(info)
                 output_data.append(info)
@@ -48,7 +51,7 @@ class InfoMixIn:
                     .strip()
                     .strip()
                     .format(**info)
                     .format(**info)
                 )
                 )
-            if not args.json and len(archive_names) - i:
+            if not args.json and len(archive_infos) - i:
                 print()
                 print()
 
 
         if args.json:
         if args.json:
@@ -83,3 +86,6 @@ class InfoMixIn:
         subparser.set_defaults(func=self.do_info)
         subparser.set_defaults(func=self.do_info)
         subparser.add_argument("--json", action="store_true", help="format output as JSON")
         subparser.add_argument("--json", action="store_true", help="format output as JSON")
         define_archive_filters_group(subparser)
         define_archive_filters_group(subparser)
+        subparser.add_argument(
+            "name", metavar="NAME", nargs="?", type=archivename_validator, help="specify the archive name"
+        )

+ 3 - 1
src/borg/archiver/list_cmd.py

@@ -27,8 +27,10 @@ class ListMixIn:
         else:
         else:
             format = os.environ.get("BORG_LIST_FORMAT", "{mode} {user:6} {group:6} {size:8} {mtime} {path}{extra}{NL}")
             format = os.environ.get("BORG_LIST_FORMAT", "{mode} {user:6} {group:6} {size:8} {mtime} {path}{extra}{NL}")
 
 
+        archive_info = manifest.archives.get_one(args.name)
+
         def _list_inner(cache):
         def _list_inner(cache):
-            archive = Archive(manifest, args.name, cache=cache)
+            archive = Archive(manifest, archive_info.id, cache=cache)
             formatter = ItemFormatter(archive, format)
             formatter = ItemFormatter(archive, format)
             for item in archive.iter_items(lambda item: matcher.match(item.path)):
             for item in archive.iter_items(lambda item: matcher.match(item.path)):
                 sys.stdout.write(formatter.format_item(item, args.json_lines, sort=True))
                 sys.stdout.write(formatter.format_item(item, args.json_lines, sort=True))

+ 15 - 8
src/borg/archiver/prune_cmd.py

@@ -10,6 +10,7 @@ from ..archive import Archive
 from ..cache import Cache
 from ..cache import Cache
 from ..constants import *  # NOQA
 from ..constants import *  # NOQA
 from ..helpers import ArchiveFormatter, interval, sig_int, ProgressIndicatorPercent, CommandError, Error
 from ..helpers import ArchiveFormatter, interval, sig_int, ProgressIndicatorPercent, CommandError, Error
+from ..helpers import archivename_validator
 from ..manifest import Manifest
 from ..manifest import Manifest
 
 
 from ..logger import create_logger
 from ..logger import create_logger
@@ -90,7 +91,9 @@ class PruneMixIn:
             format = os.environ.get("BORG_PRUNE_FORMAT", "{archive:<36} {time} [{id}]")
             format = os.environ.get("BORG_PRUNE_FORMAT", "{archive:<36} {time} [{id}]")
         formatter = ArchiveFormatter(format, repository, manifest, manifest.key, iec=args.iec)
         formatter = ArchiveFormatter(format, repository, manifest, manifest.key, iec=args.iec)
 
 
-        archives = manifest.archives.list(match=args.match_archives, sort_by=["ts"], reverse=True)
+        match = args.name if args.name else args.match_archives
+        archives = manifest.archives.list(match=match, sort_by=["ts"], reverse=True)
+
         keep = []
         keep = []
         # collect the rule responsible for the keeping of each archive in this dict
         # collect the rule responsible for the keeping of each archive in this dict
         # keys are archive ids, values are a tuple
         # keys are archive ids, values are a tuple
@@ -125,7 +128,7 @@ class PruneMixIn:
                     else:
                     else:
                         archives_deleted += 1
                         archives_deleted += 1
                         log_message = "Pruning archive (%d/%d):" % (archives_deleted, to_delete_len)
                         log_message = "Pruning archive (%d/%d):" % (archives_deleted, to_delete_len)
-                        archive = Archive(manifest, archive.name, cache)
+                        archive = Archive(manifest, archive.id, cache=cache)
                         archive.delete()
                         archive.delete()
                         uncommitted_deletes += 1
                         uncommitted_deletes += 1
                 else:
                 else:
@@ -160,17 +163,18 @@ class PruneMixIn:
         `GFS <https://en.wikipedia.org/wiki/Backup_rotation_scheme#Grandfather-father-son>`_
         `GFS <https://en.wikipedia.org/wiki/Backup_rotation_scheme#Grandfather-father-son>`_
         (Grandfather-father-son) backup rotation scheme.
         (Grandfather-father-son) backup rotation scheme.
 
 
-        If you use --match-archives (-a), then only archives that match the pattern are
-        considered for deletion and only those archives count towards the totals
-        specified by the rules.
+        The recommended way to use prune is to give the archive series name to it via the
+        NAME argument (assuming you have the same name for all archives in a series).
+        Alternatively, you can also use --match-archives (-a), then only archives that
+        match the pattern are considered for deletion and only those archives count
+        towards the totals specified by the rules.
         Otherwise, *all* archives in the repository are candidates for deletion!
         Otherwise, *all* archives in the repository are candidates for deletion!
         There is no automatic distinction between archives representing different
         There is no automatic distinction between archives representing different
         contents. These need to be distinguished by specifying matching globs.
         contents. These need to be distinguished by specifying matching globs.
 
 
-        If you have multiple sequences of archives with different data sets (e.g.
+        If you have multiple series of archives with different data sets (e.g.
         from different machines) in one shared repository, use one prune call per
         from different machines) in one shared repository, use one prune call per
-        data set that matches only the respective archives using the --match-archives
-        (-a) option.
+        series.
 
 
         The ``--keep-within`` option takes an argument of the form "<int><char>",
         The ``--keep-within`` option takes an argument of the form "<int><char>",
         where char is "H", "d", "w", "m", "y". For example, ``--keep-within 2d`` means
         where char is "H", "d", "w", "m", "y". For example, ``--keep-within 2d`` means
@@ -299,3 +303,6 @@ class PruneMixIn:
             help="number of yearly archives to keep",
             help="number of yearly archives to keep",
         )
         )
         define_archive_filters_group(subparser, sort_by=False, first_last=False)
         define_archive_filters_group(subparser, sort_by=False, first_last=False)
+        subparser.add_argument(
+            "name", metavar="NAME", nargs="?", type=archivename_validator, help="specify the archive name"
+        )

+ 14 - 11
src/borg/archiver/recreate_cmd.py

@@ -5,7 +5,7 @@ from ._common import build_matcher
 from ..archive import ArchiveRecreater
 from ..archive import ArchiveRecreater
 from ..constants import *  # NOQA
 from ..constants import *  # NOQA
 from ..compress import CompressionSpec
 from ..compress import CompressionSpec
-from ..helpers import archivename_validator, comment_validator, PathSpec, ChunkerParams, CommandError
+from ..helpers import archivename_validator, comment_validator, PathSpec, ChunkerParams, bin_to_hex
 from ..helpers import timestamp
 from ..helpers import timestamp
 from ..manifest import Manifest
 from ..manifest import Manifest
 
 
@@ -38,15 +38,19 @@ class RecreateMixIn:
             timestamp=args.timestamp,
             timestamp=args.timestamp,
         )
         )
 
 
-        archive_names = tuple(archive.name for archive in manifest.archives.list_considering(args))
-        if args.target is not None and len(archive_names) != 1:
-            raise CommandError("--target: Need to specify single archive")
-        for name in archive_names:
-            if recreater.is_temporary_archive(name):
+        for archive_info in manifest.archives.list_considering(args):
+            if recreater.is_temporary_archive(archive_info.name):
                 continue
                 continue
-            print("Processing", name)
-            if not recreater.recreate(name, args.comment, args.target):
-                logger.info("Skipped archive %s: Nothing to do. Archive was not processed.", name)
+            name, hex_id = archive_info.name, bin_to_hex(archive_info.id)
+            print(f"Processing {name} {hex_id}")
+            if args.target:
+                target = args.target
+                delete_original = False
+            else:
+                target = archive_info.name
+                delete_original = True
+            if not recreater.recreate(archive_info.id, target, delete_original, args.comment):
+                logger.info(f"Skipped archive {name} {hex_id}: Nothing to do.")
         if not args.dry_run:
         if not args.dry_run:
             manifest.write()
             manifest.write()
 
 
@@ -135,8 +139,7 @@ class RecreateMixIn:
             default=None,
             default=None,
             type=archivename_validator,
             type=archivename_validator,
             action=Highlander,
             action=Highlander,
-            help="create a new archive with the name ARCHIVE, do not replace existing archive "
-            "(only applies for a single archive)",
+            help="create a new archive with the name ARCHIVE, do not replace existing archive",
         )
         )
         archive_group.add_argument(
         archive_group.add_argument(
             "--comment",
             "--comment",

+ 29 - 15
src/borg/archiver/transfer_cmd.py

@@ -7,7 +7,7 @@ from ..constants import *  # NOQA
 from ..crypto.key import uses_same_id_hash, uses_same_chunker_secret
 from ..crypto.key import uses_same_id_hash, uses_same_chunker_secret
 from ..helpers import Error
 from ..helpers import Error
 from ..helpers import location_validator, Location, archivename_validator, comment_validator
 from ..helpers import location_validator, Location, archivename_validator, comment_validator
-from ..helpers import format_file_size
+from ..helpers import format_file_size, bin_to_hex
 from ..manifest import Manifest
 from ..manifest import Manifest
 
 
 from ..logger import create_logger
 from ..logger import create_logger
@@ -33,14 +33,15 @@ class TransferMixIn:
             )
             )
 
 
         dry_run = args.dry_run
         dry_run = args.dry_run
-        archive_names = tuple(x.name for x in other_manifest.archives.list_considering(args))
-        if not archive_names:
+        archive_infos = other_manifest.archives.list_considering(args)
+        count = len(archive_infos)
+        if count == 0:
             return
             return
 
 
         an_errors = []
         an_errors = []
-        for archive_name in archive_names:
+        for archive_info in archive_infos:
             try:
             try:
-                archivename_validator(archive_name)
+                archivename_validator(archive_info.name)
             except argparse.ArgumentTypeError as err:
             except argparse.ArgumentTypeError as err:
                 an_errors.append(str(err))
                 an_errors.append(str(err))
         if an_errors:
         if an_errors:
@@ -48,12 +49,12 @@ class TransferMixIn:
             raise Error("\n".join(an_errors))
             raise Error("\n".join(an_errors))
 
 
         ac_errors = []
         ac_errors = []
-        for archive_name in archive_names:
-            archive = Archive(other_manifest, archive_name)
+        for archive_info in archive_infos:
+            archive = Archive(other_manifest, archive_info.id)
             try:
             try:
                 comment_validator(archive.metadata.get("comment", ""))
                 comment_validator(archive.metadata.get("comment", ""))
             except argparse.ArgumentTypeError as err:
             except argparse.ArgumentTypeError as err:
-                ac_errors.append(f"{archive_name}: {err}")
+                ac_errors.append(f"{archive_info.name}: {err}")
         if ac_errors:
         if ac_errors:
             ac_errors.insert(0, "Invalid archive comments detected, please fix them before transfer:")
             ac_errors.insert(0, "Invalid archive comments detected, please fix them before transfer:")
             raise Error("\n".join(ac_errors))
             raise Error("\n".join(ac_errors))
@@ -75,14 +76,27 @@ class TransferMixIn:
 
 
         upgrader = UpgraderCls(cache=cache)
         upgrader = UpgraderCls(cache=cache)
 
 
-        for name in archive_names:
+        for archive_info in archive_infos:
+            name, id, ts = archive_info.name, archive_info.id, archive_info.ts
+            id_hex, ts_str = bin_to_hex(id), ts.isoformat()
             transfer_size = 0
             transfer_size = 0
             present_size = 0
             present_size = 0
-            if manifest.archives.exists(name) and not dry_run:
-                print(f"{name}: archive is already present in destination repo, skipping.")
+            # at least for borg 1.x -> borg2 transfers, we can not use the id to check for
+            # already transferred archives (due to upgrade of metadata stream, id will be
+            # different anyway). so we use archive name and timestamp.
+            # the name alone might be sufficient for borg 1.x -> 2 transfers, but isn't
+            # for 2 -> 2 transfers, because borg2 allows duplicate names ("series" feature).
+            # so, best is to check for both name/ts and name/id.
+            if not dry_run and manifest.archives.exists_name_and_ts(name, archive_info.ts):
+                # useful for borg 1.x -> 2 transfers, we have unique names in borg 1.x.
+                # also useful for borg 2 -> 2 transfers with metadata changes (id changes).
+                print(f"{name} {ts_str}: archive is already present in destination repo, skipping.")
+            elif not dry_run and manifest.archives.exists_name_and_id(name, id):
+                # useful for borg 2 -> 2 transfers without changes (id stays the same)
+                print(f"{name} {id_hex}: archive is already present in destination repo, skipping.")
             else:
             else:
                 if not dry_run:
                 if not dry_run:
-                    print(f"{name}: copying archive to destination repo...")
+                    print(f"{name} {ts_str} {id_hex}: copying archive to destination repo...")
                 other_archive = Archive(other_manifest, name)
                 other_archive = Archive(other_manifest, name)
                 archive = (
                 archive = (
                     Archive(manifest, name, cache=cache, create=True, progress=args.progress) if not dry_run else None
                     Archive(manifest, name, cache=cache, create=True, progress=args.progress) if not dry_run else None
@@ -162,15 +176,15 @@ class TransferMixIn:
                     additional_metadata = upgrader.upgrade_archive_metadata(metadata=other_archive.metadata)
                     additional_metadata = upgrader.upgrade_archive_metadata(metadata=other_archive.metadata)
                     archive.save(additional_metadata=additional_metadata)
                     archive.save(additional_metadata=additional_metadata)
                     print(
                     print(
-                        f"{name}: finished. "
+                        f"{name} {ts_str} {id_hex}: finished. "
                         f"transfer_size: {format_file_size(transfer_size)} "
                         f"transfer_size: {format_file_size(transfer_size)} "
                         f"present_size: {format_file_size(present_size)}"
                         f"present_size: {format_file_size(present_size)}"
                     )
                     )
                 else:
                 else:
                     print(
                     print(
-                        f"{name}: completed"
+                        f"{name} {ts_str} {id_hex}: completed"
                         if transfer_size == 0
                         if transfer_size == 0
-                        else f"{name}: incomplete, "
+                        else f"{name} {ts_str} {id_hex}: incomplete, "
                         f"transfer_size: {format_file_size(transfer_size)} "
                         f"transfer_size: {format_file_size(transfer_size)} "
                         f"present_size: {format_file_size(present_size)}"
                         f"present_size: {format_file_size(present_size)}"
                     )
                     )

+ 144 - 70
src/borg/manifest.py

@@ -15,7 +15,8 @@ from .constants import *  # NOQA
 from .helpers.datastruct import StableDict
 from .helpers.datastruct import StableDict
 from .helpers.parseformat import bin_to_hex, hex_to_bin
 from .helpers.parseformat import bin_to_hex, hex_to_bin
 from .helpers.time import parse_timestamp, calculate_relative_offset, archive_ts_now
 from .helpers.time import parse_timestamp, calculate_relative_offset, archive_ts_now
-from .helpers.errors import Error
+from .helpers.errors import Error, CommandError
+from .item import ArchiveItem
 from .patterns import get_regex_from_pattern
 from .patterns import get_regex_from_pattern
 from .repoobj import RepoObj
 from .repoobj import RepoObj
 
 
@@ -100,16 +101,74 @@ class Archives:
             manifest_archives = StableDict(self._get_raw_dict())
             manifest_archives = StableDict(self._get_raw_dict())
         return manifest_archives
         return manifest_archives
 
 
-    def count(self):
-        # return the count of archives in the repo
+    def ids(self):
+        # yield the binary IDs of all archives
         if not self.legacy:
         if not self.legacy:
             try:
             try:
                 infos = list(self.repository.store_list("archives"))
                 infos = list(self.repository.store_list("archives"))
             except ObjectNotFound:
             except ObjectNotFound:
                 infos = []
                 infos = []
-            return len(infos)  # we do not check here if entries are valid
+            for info in infos:
+                info = ItemInfo(*info)  # RPC does not give us a NamedTuple
+                yield hex_to_bin(info.name)
         else:
         else:
-            return len(self._archives)
+            for archive_info in self._archives.values():
+                yield archive_info["id"]
+
+    def _get_archive_meta(self, id: bytes) -> dict:
+        # get all metadata directly from the ArchiveItem in the repo.
+        from .legacyrepository import LegacyRepository
+        from .repository import Repository
+
+        try:
+            cdata = self.repository.get(id)
+        except (Repository.ObjectNotFound, LegacyRepository.ObjectNotFound):
+            metadata = dict(
+                id=id,
+                name="archive-does-not-exist",
+                time="1970-01-01T00:00:00.000000",
+                # new:
+                exists=False,  # we have the pointer, but the repo does not have an archive item
+            )
+        else:
+            _, data = self.manifest.repo_objs.parse(id, cdata, ro_type=ROBJ_ARCHIVE_META)
+            archive_dict = self.manifest.key.unpack_archive(data)
+            archive_item = ArchiveItem(internal_dict=archive_dict)
+            if archive_item.version not in (1, 2):  # legacy: still need to read v1 archives
+                raise Exception("Unknown archive metadata version")
+            # callers expect a dict with dict["key"] access, not ArchiveItem.key access.
+            # also, we need to put the id in there.
+            metadata = dict(
+                id=id,
+                name=archive_item.name,
+                time=archive_item.time,
+                # new:
+                exists=True,  # repo has a valid archive item
+                username=archive_item.username,
+                hostname=archive_item.hostname,
+                size=archive_item.size,
+                nfiles=archive_item.nfiles,
+                comment=archive_item.comment,  # not always present?
+            )
+        return metadata
+
+    def _infos(self):
+        # yield the infos of all archives
+        for id in self.ids():
+            yield self._get_archive_meta(id)
+
+    def _info_tuples(self):
+        for info in self._infos():
+            yield ArchiveInfo(name=info["name"], id=info["id"], ts=parse_timestamp(info["time"]))
+
+    def count(self):
+        # return the count of archives in the repo
+        return len(list(self.ids()))
+
+    def names(self):
+        # yield the names of all archives
+        for archive_info in self._infos():
+            yield archive_info["name"]
 
 
     def exists(self, name):
     def exists(self, name):
         # check if an archive with this name exists
         # check if an archive with this name exists
@@ -124,7 +183,7 @@ class Archives:
         assert isinstance(name, str)
         assert isinstance(name, str)
         assert isinstance(id, bytes)
         assert isinstance(id, bytes)
         if not self.legacy:
         if not self.legacy:
-            for _, archive_info in self._infos():
+            for archive_info in self._infos():
                 if archive_info["name"] == name and archive_info["id"] == id:
                 if archive_info["name"] == name and archive_info["id"] == id:
                     return True
                     return True
             else:
             else:
@@ -132,53 +191,37 @@ class Archives:
         else:
         else:
             raise NotImplementedError
             raise NotImplementedError
 
 
-    def _infos(self):
-        # yield the infos of all archives: (store_key, archive_info)
-        from .helpers import msgpack
-
+    def exists_name_and_ts(self, name, ts):
+        # check if an archive with this name AND timestamp exists
+        assert isinstance(name, str)
+        assert isinstance(ts, datetime)
         if not self.legacy:
         if not self.legacy:
-            try:
-                infos = list(self.repository.store_list("archives"))
-            except ObjectNotFound:
-                infos = []
-            for info in infos:
-                info = ItemInfo(*info)  # RPC does not give us a NamedTuple
-                value = self.repository.store_load(f"archives/{info.name}")
-                _, value = self.manifest.repo_objs.parse(hex_to_bin(info.name), value, ro_type=ROBJ_MANIFEST)
-                archive_info = msgpack.unpackb(value)
-                yield info.name, archive_info
+            for archive_info in self._info_tuples():
+                if archive_info.name == name and archive_info.ts == ts:
+                    return True
+            else:
+                return False
         else:
         else:
-            for name in self._archives:
-                archive_info = dict(name=name, id=self._archives[name]["id"], time=self._archives[name]["time"])
-                yield None, archive_info
+            raise NotImplementedError
 
 
     def _lookup_name(self, name, raw=False):
     def _lookup_name(self, name, raw=False):
         assert isinstance(name, str)
         assert isinstance(name, str)
         assert not self.legacy
         assert not self.legacy
-        for store_key, archive_info in self._infos():
-            if archive_info["name"] == name:
+        for archive_info in self._infos():
+            if archive_info["exists"] and archive_info["name"] == name:
                 if not raw:
                 if not raw:
                     ts = parse_timestamp(archive_info["time"])
                     ts = parse_timestamp(archive_info["time"])
-                    return store_key, ArchiveInfo(name=name, id=archive_info["id"], ts=ts)
+                    return ArchiveInfo(name=archive_info["name"], id=archive_info["id"], ts=ts)
                 else:
                 else:
-                    return store_key, archive_info
+                    return archive_info
         else:
         else:
             raise KeyError(name)
             raise KeyError(name)
 
 
-    def names(self):
-        # yield the names of all archives
-        if not self.legacy:
-            for _, archive_info in self._infos():
-                yield archive_info["name"]
-        else:
-            yield from self._archives
-
     def get(self, name, raw=False):
     def get(self, name, raw=False):
         assert isinstance(name, str)
         assert isinstance(name, str)
         if not self.legacy:
         if not self.legacy:
             try:
             try:
-                store_key, archive_info = self._lookup_name(name, raw=raw)
-                return archive_info
+                return self._lookup_name(name, raw=raw)
             except KeyError:
             except KeyError:
                 return None
                 return None
         else:
         else:
@@ -191,6 +234,29 @@ class Archives:
             else:
             else:
                 return dict(name=name, id=values["id"], time=values["time"])
                 return dict(name=name, id=values["id"], time=values["time"])
 
 
+    def get_by_id(self, id, raw=False):
+        assert isinstance(id, bytes)
+        if not self.legacy:
+            if id in self.ids():  # check directory
+                # looks like this archive id is in the archives directory, thus it is NOT deleted.
+                archive_info = self._get_archive_meta(id)
+                if archive_info["exists"]:
+                    if not raw:
+                        ts = parse_timestamp(archive_info["time"])
+                        archive_info = ArchiveInfo(name=archive_info["name"], id=archive_info["id"], ts=ts)
+                    return archive_info
+        else:
+            for name, values in self._archives.items():
+                if id == values["id"]:
+                    break
+            else:
+                return None
+            if not raw:
+                ts = parse_timestamp(values["time"])
+                return ArchiveInfo(name=name, id=values["id"], ts=ts)
+            else:
+                return dict(name=name, id=values["id"], time=values["time"])
+
     def create(self, name, id, ts, *, overwrite=False):
     def create(self, name, id, ts, *, overwrite=False):
         assert isinstance(name, str)
         assert isinstance(name, str)
         assert isinstance(id, bytes)
         assert isinstance(id, bytes)
@@ -198,35 +264,18 @@ class Archives:
             ts = ts.isoformat(timespec="microseconds")
             ts = ts.isoformat(timespec="microseconds")
         assert isinstance(ts, str)
         assert isinstance(ts, str)
         if not self.legacy:
         if not self.legacy:
-            try:
-                store_key, _ = self._lookup_name(name)
-            except KeyError:
-                pass
-            else:
-                # looks like we already have an archive list entry with that name
-                if not overwrite:
-                    raise KeyError("archive already exists")
-                else:
-                    self.repository.store_delete(f"archives/{store_key}")
-            archive = dict(name=name, id=id, time=ts)
-            value = self.manifest.key.pack_metadata(archive)
-            id = self.manifest.repo_objs.id_hash(value)
-            key = bin_to_hex(id)
-            value = self.manifest.repo_objs.format(id, {}, value, ro_type=ROBJ_MANIFEST)
-            self.repository.store_store(f"archives/{key}", value)
+            # we only create a directory entry, its name points to the archive item:
+            self.repository.store_store(f"archives/{bin_to_hex(id)}", b"")
         else:
         else:
             if self.exists(name) and not overwrite:
             if self.exists(name) and not overwrite:
                 raise KeyError("archive already exists")
                 raise KeyError("archive already exists")
             self._archives[name] = {"id": id, "time": ts}
             self._archives[name] = {"id": id, "time": ts}
 
 
-    def delete(self, name):
+    def delete_by_id(self, id):
         # delete an archive
         # delete an archive
-        assert isinstance(name, str)
-        if not self.legacy:
-            store_key, archive_info = self._lookup_name(name)
-            self.repository.store_delete(f"archives/{store_key}")
-        else:
-            self._archives.pop(name)
+        assert isinstance(id, bytes)
+        assert not self.legacy
+        self.repository.store_delete(f"archives/{bin_to_hex(id)}")
 
 
     def list(
     def list(
         self,
         self,
@@ -262,22 +311,32 @@ class Archives:
         if isinstance(sort_by, (str, bytes)):
         if isinstance(sort_by, (str, bytes)):
             raise TypeError("sort_by must be a sequence of str")
             raise TypeError("sort_by must be a sequence of str")
 
 
-        archives = [self.get(name) for name in self.names()]
-        regex = get_regex_from_pattern(match or "re:.*")
-        regex = re.compile(regex + match_end)
-        archives = [x for x in archives if regex.match(x.name) is not None]
+        archive_infos = self._info_tuples()
+        if match is None:
+            archive_infos = list(archive_infos)
+        elif match.startswith("aid:"):  # do a match on the archive ID (prefix)
+            wanted_id = match.removeprefix("aid:")
+            archive_infos = [x for x in archive_infos if bin_to_hex(x.id).startswith(wanted_id)]
+            if len(archive_infos) != 1:
+                raise CommandError("archive ID based match needs to match precisely one archive ID")
+        else:  #  do a match on the name
+            regex = get_regex_from_pattern(match)
+            regex = re.compile(regex + match_end)
+            archive_infos = [x for x in archive_infos if regex.match(x.name) is not None]
 
 
         if any([oldest, newest, older, newer]):
         if any([oldest, newest, older, newer]):
-            archives = filter_archives_by_date(archives, oldest=oldest, newest=newest, newer=newer, older=older)
+            archive_infos = filter_archives_by_date(
+                archive_infos, oldest=oldest, newest=newest, newer=newer, older=older
+            )
         for sortkey in reversed(sort_by):
         for sortkey in reversed(sort_by):
-            archives.sort(key=attrgetter(sortkey))
+            archive_infos.sort(key=attrgetter(sortkey))
         if first:
         if first:
-            archives = archives[:first]
+            archive_infos = archive_infos[:first]
         elif last:
         elif last:
-            archives = archives[max(len(archives) - last, 0) :]
+            archive_infos = archive_infos[max(len(archive_infos) - last, 0) :]
         if reverse:
         if reverse:
-            archives.reverse()
-        return archives
+            archive_infos.reverse()
+        return archive_infos
 
 
     def list_considering(self, args):
     def list_considering(self, args):
         """
         """
@@ -299,6 +358,21 @@ class Archives:
             newest=getattr(args, "newest", None),
             newest=getattr(args, "newest", None),
         )
         )
 
 
+    def get_one(self, match, *, match_end=r"\Z"):
+        """get exactly one archive matching <match>"""
+        assert match is not None
+        archive_infos = self._info_tuples()
+        if match.startswith("aid:"):  # do a match on the archive ID (prefix)
+            wanted_id = match.removeprefix("aid:")
+            archive_infos = [i for i in archive_infos if bin_to_hex(i.id).startswith(wanted_id)]
+        else:  # do a match on the name
+            regex = get_regex_from_pattern(match)
+            regex = re.compile(regex + match_end)
+            archive_infos = [i for i in archive_infos if regex.match(i.name) is not None]
+        if len(archive_infos) != 1:
+            raise CommandError(f"{match} needed to match precisely one archive, but matched {len(archive_infos)}.")
+        return archive_infos[0]
+
     def _set_raw_dict(self, d):
     def _set_raw_dict(self, d):
         """set the dict we get from the msgpack unpacker"""
         """set the dict we get from the msgpack unpacker"""
         for k, v in d.items():
         for k, v in d.items():

+ 1 - 0
src/borg/remote.py

@@ -155,6 +155,7 @@ class RepositoryServer:  # pragma: no cover
         "load_key",
         "load_key",
         "break_lock",
         "break_lock",
         "inject_exception",
         "inject_exception",
+        "get_manifest",  # borg2 LegacyRepository has this
     )
     )
 
 
     _rpc_methods = (  # Repository
     _rpc_methods = (  # Repository

+ 10 - 26
src/borg/testsuite/archiver/check_cmd.py

@@ -267,36 +267,20 @@ def test_manifest_rebuild_corrupted_chunk(archivers, request):
     cmd(archiver, "check", exit_code=0)
     cmd(archiver, "check", exit_code=0)
 
 
 
 
-def test_manifest_rebuild_duplicate_archive(archivers, request):
+def test_check_undelete_archives(archivers, request):
     archiver = request.getfixturevalue(archivers)
     archiver = request.getfixturevalue(archivers)
-    check_cmd_setup(archiver)
-    archive, repository = open_archive(archiver.repository_path, "archive1")
-    repo_objs = archive.repo_objs
-    with repository:
-        manifest = repository.get_manifest()
-        corrupted_manifest = manifest[:123] + b"corrupted!" + manifest[123:]
-        repository.put_manifest(corrupted_manifest)
-        archive_dict = {
-            "command_line": "",
-            "item_ptrs": [],
-            "hostname": "foo",
-            "username": "bar",
-            "name": "archive1",
-            "time": "2016-12-15T18:49:51.849711",
-            "version": 2,
-        }
-        archive = repo_objs.key.pack_metadata(archive_dict)
-        archive_id = repo_objs.id_hash(archive)
-        repository.put(archive_id, repo_objs.format(archive_id, {}, archive, ro_type=ROBJ_ARCHIVE_META))
-    cmd(archiver, "check", exit_code=1)
-    # when undeleting archives, borg check will discover both the original archive1 as well as
-    # the fake archive1 we created above. for the fake one, a new archives directory entry
-    # named archive1.1 will be created because we request undeleting archives and there
-    # is no archives directory entry for the fake archive yet.
+    check_cmd_setup(archiver)  # creates archive1 and archive2
+    # borg delete does it rather quick and dirty: it only kills the archives directory entry
+    cmd(archiver, "delete", "archive1")
+    cmd(archiver, "delete", "archive2")
+    output = cmd(archiver, "repo-list")
+    assert "archive1" not in output
+    assert "archive2" not in output
+    # borg check will re-discover archive1 and archive2 and new archives directory entries
+    # will be created because we requested undeleting archives.
     cmd(archiver, "check", "--repair", "--undelete-archives", exit_code=0)
     cmd(archiver, "check", "--repair", "--undelete-archives", exit_code=0)
     output = cmd(archiver, "repo-list")
     output = cmd(archiver, "repo-list")
     assert "archive1" in output
     assert "archive1" in output
-    assert "archive1.1" in output
     assert "archive2" in output
     assert "archive2" in output
 
 
 
 

+ 0 - 13
src/borg/testsuite/archiver/recreate_cmd.py

@@ -5,7 +5,6 @@ from datetime import datetime
 import pytest
 import pytest
 
 
 from ...constants import *  # NOQA
 from ...constants import *  # NOQA
-from ...helpers import CommandError
 from .. import changedir, are_hardlinks_supported
 from .. import changedir, are_hardlinks_supported
 from . import (
 from . import (
     _create_test_caches,
     _create_test_caches,
@@ -79,18 +78,6 @@ def test_recreate_hardlinked_tags(archivers, request):  # test for issue #4911
     # if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
     # if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
 
 
 
 
-def test_recreate_target_rc(archivers, request):
-    archiver = request.getfixturevalue(archivers)
-    cmd(archiver, "repo-create", RK_ENCRYPTION)
-    if archiver.FORK_DEFAULT:
-        expected_ec = CommandError().exit_code
-        output = cmd(archiver, "recreate", "--target=asdf", exit_code=expected_ec)
-        assert "Need to specify single archive" in output
-    else:
-        with pytest.raises(CommandError):
-            cmd(archiver, "recreate", "--target=asdf")
-
-
 def test_recreate_target(archivers, request):
 def test_recreate_target(archivers, request):
     archiver = request.getfixturevalue(archivers)
     archiver = request.getfixturevalue(archivers)
     create_test_files(archiver.input_path)
     create_test_files(archiver.input_path)

+ 0 - 3
src/borg/testsuite/archiver/return_codes.py

@@ -1,4 +1,3 @@
-from ...archive import Archive
 from ...constants import *  # NOQA
 from ...constants import *  # NOQA
 from ...helpers import IncludePatternNeverMatchedWarning
 from ...helpers import IncludePatternNeverMatchedWarning
 from . import cmd_fixture, changedir  # NOQA
 from . import cmd_fixture, changedir  # NOQA
@@ -18,5 +17,3 @@ def test_return_codes(cmd_fixture, tmpdir):
         assert rc == EXIT_SUCCESS
         assert rc == EXIT_SUCCESS
     rc, out = cmd_fixture("--repo=%s" % repo, "extract", "archive", "does/not/match")
     rc, out = cmd_fixture("--repo=%s" % repo, "extract", "archive", "does/not/match")
     assert rc == IncludePatternNeverMatchedWarning().exit_code
     assert rc == IncludePatternNeverMatchedWarning().exit_code
-    rc, out = cmd_fixture("--repo=%s" % repo, "create", "archive", str(input))
-    assert rc == Archive.AlreadyExists().exit_code