|
@@ -6,12 +6,12 @@ import tempfile
|
|
|
import time
|
|
|
from collections import defaultdict
|
|
|
from distutils.version import LooseVersion
|
|
|
+from zlib import adler32
|
|
|
|
|
|
import llfuse
|
|
|
import msgpack
|
|
|
|
|
|
from .logger import create_logger
|
|
|
-from .lrucache import LRUCache
|
|
|
logger = create_logger()
|
|
|
|
|
|
from .archive import Archive
|
|
@@ -51,14 +51,18 @@ class ItemCache:
|
|
|
class FuseOperations(llfuse.Operations):
|
|
|
"""Export archive as a fuse filesystem
|
|
|
"""
|
|
|
-
|
|
|
+ # mount options
|
|
|
allow_damaged_files = False
|
|
|
+ versions = False
|
|
|
|
|
|
def __init__(self, key, repository, manifest, archive, cached_repo):
|
|
|
super().__init__()
|
|
|
- self._inode_count = 0
|
|
|
- self.key = key
|
|
|
+ self.repository_uncached = repository
|
|
|
self.repository = cached_repo
|
|
|
+ self.archive = archive
|
|
|
+ self.manifest = manifest
|
|
|
+ self.key = key
|
|
|
+ self._inode_count = 0
|
|
|
self.items = {}
|
|
|
self.parent = {}
|
|
|
self.contents = defaultdict(dict)
|
|
@@ -69,15 +73,22 @@ class FuseOperations(llfuse.Operations):
|
|
|
data_cache_capacity = int(os.environ.get('BORG_MOUNT_DATA_CACHE_ENTRIES', os.cpu_count() or 1))
|
|
|
logger.debug('mount data cache capacity: %d chunks', data_cache_capacity)
|
|
|
self.data_cache = LRUCache(capacity=data_cache_capacity, dispose=lambda _: None)
|
|
|
+
|
|
|
+ def _create_filesystem(self):
|
|
|
self._create_dir(parent=1) # first call, create root dir (inode == 1)
|
|
|
- if archive:
|
|
|
- self.process_archive(archive)
|
|
|
+ if self.archive:
|
|
|
+ self.process_archive(self.archive)
|
|
|
else:
|
|
|
- for name in manifest.archives:
|
|
|
- # Create archive placeholder inode
|
|
|
- archive_inode = self._create_dir(parent=1)
|
|
|
- self.contents[1][os.fsencode(name)] = archive_inode
|
|
|
- self.pending_archives[archive_inode] = Archive(repository, key, manifest, name)
|
|
|
+ for name in self.manifest.archives:
|
|
|
+ archive = Archive(self.repository_uncached, self.key, self.manifest, name)
|
|
|
+ if self.versions:
|
|
|
+ # process archives immediately
|
|
|
+ self.process_archive(archive)
|
|
|
+ else:
|
|
|
+ # lazy load archives, create archive placeholder inode
|
|
|
+ archive_inode = self._create_dir(parent=1)
|
|
|
+ self.contents[1][os.fsencode(name)] = archive_inode
|
|
|
+ self.pending_archives[archive_inode] = archive
|
|
|
|
|
|
def mount(self, mountpoint, mount_options, foreground=False):
|
|
|
"""Mount filesystem on *mountpoint* with *mount_options*."""
|
|
@@ -89,6 +100,12 @@ class FuseOperations(llfuse.Operations):
|
|
|
self.allow_damaged_files = True
|
|
|
except ValueError:
|
|
|
pass
|
|
|
+ try:
|
|
|
+ options.remove('versions')
|
|
|
+ self.versions = True
|
|
|
+ except ValueError:
|
|
|
+ pass
|
|
|
+ self._create_filesystem()
|
|
|
llfuse.init(self, mountpoint, options)
|
|
|
if not foreground:
|
|
|
daemonize()
|
|
@@ -122,11 +139,16 @@ class FuseOperations(llfuse.Operations):
|
|
|
unpacker.feed(data)
|
|
|
for item in unpacker:
|
|
|
item = Item(internal_dict=item)
|
|
|
+ is_dir = stat.S_ISDIR(item.mode)
|
|
|
try:
|
|
|
# This can happen if an archive was created with a command line like
|
|
|
# $ borg create ... dir1/file dir1
|
|
|
# In this case the code below will have created a default_dir inode for dir1 already.
|
|
|
- inode = self._find_inode(safe_encode(item.path), prefix)
|
|
|
+ path = safe_encode(item.path)
|
|
|
+ if not is_dir:
|
|
|
+ # not a directory -> no lookup needed
|
|
|
+ raise KeyError
|
|
|
+ inode = self._find_inode(path, prefix)
|
|
|
except KeyError:
|
|
|
pass
|
|
|
else:
|
|
@@ -137,25 +159,46 @@ class FuseOperations(llfuse.Operations):
|
|
|
num_segments = len(segments)
|
|
|
parent = 1
|
|
|
for i, segment in enumerate(segments, 1):
|
|
|
- # Leaf segment?
|
|
|
if i == num_segments:
|
|
|
- if 'source' in item and stat.S_ISREG(item.mode):
|
|
|
- inode = self._find_inode(item.source, prefix)
|
|
|
- item = self.cache.get(inode)
|
|
|
- item.nlink = item.get('nlink', 1) + 1
|
|
|
- self.items[inode] = item
|
|
|
- else:
|
|
|
- inode = self.cache.add(item)
|
|
|
- self.parent[inode] = parent
|
|
|
- if segment:
|
|
|
- self.contents[parent][segment] = inode
|
|
|
- elif segment in self.contents[parent]:
|
|
|
- parent = self.contents[parent][segment]
|
|
|
+ self.process_leaf(segment, item, parent, prefix, is_dir)
|
|
|
else:
|
|
|
- inode = self._create_dir(parent)
|
|
|
- if segment:
|
|
|
- self.contents[parent][segment] = inode
|
|
|
- parent = inode
|
|
|
+ parent = self.process_inner(segment, parent)
|
|
|
+
|
|
|
+ def process_leaf(self, name, item, parent, prefix, is_dir):
|
|
|
+ def version_name(name, item):
|
|
|
+ if 'chunks' in item:
|
|
|
+ ident = 0
|
|
|
+ for chunkid, _, _ in item.chunks:
|
|
|
+ ident = adler32(chunkid, ident)
|
|
|
+ name = name + safe_encode('.%08x' % ident)
|
|
|
+ return name
|
|
|
+
|
|
|
+ if self.versions and not is_dir:
|
|
|
+ parent = self.process_inner(name, parent)
|
|
|
+ name = version_name(name, item)
|
|
|
+ self.process_real_leaf(name, item, parent, prefix)
|
|
|
+
|
|
|
+ def process_real_leaf(self, name, item, parent, prefix):
|
|
|
+ if 'source' in item and stat.S_ISREG(item.mode):
|
|
|
+ inode = self._find_inode(item.source, prefix)
|
|
|
+ item = self.cache.get(inode)
|
|
|
+ item.nlink = item.get('nlink', 1) + 1
|
|
|
+ self.items[inode] = item
|
|
|
+ else:
|
|
|
+ inode = self.cache.add(item)
|
|
|
+ self.parent[inode] = parent
|
|
|
+ if name:
|
|
|
+ self.contents[parent][name] = inode
|
|
|
+
|
|
|
+ def process_inner(self, name, parent):
|
|
|
+ if name in self.contents[parent]:
|
|
|
+ parent = self.contents[parent][name]
|
|
|
+ else:
|
|
|
+ inode = self._create_dir(parent)
|
|
|
+ if name:
|
|
|
+ self.contents[parent][name] = inode
|
|
|
+ parent = inode
|
|
|
+ return parent
|
|
|
|
|
|
def allocate_inode(self):
|
|
|
self._inode_count += 1
|
|
@@ -280,7 +323,6 @@ class FuseOperations(llfuse.Operations):
|
|
|
# evict fully read chunk from cache
|
|
|
del self.data_cache[id]
|
|
|
else:
|
|
|
- # XXX
|
|
|
_, data = self.key.decrypt(id, self.repository.get(id))
|
|
|
if offset + n < len(data):
|
|
|
# chunk was only partially read, cache it
|