Browse Source

New cache implementation

Jonas Borgström 14 years ago
parent
commit
666db4bf4f
5 changed files with 129 additions and 63 deletions
  1. 4 3
      darc/archive.py
  2. 0 1
      darc/archiver.py
  3. 117 57
      darc/cache.py
  4. 6 0
      darc/hashindex.pyx
  5. 2 2
      darc/store.py

+ 4 - 3
darc/archive.py

@@ -79,7 +79,7 @@ class Archive(object):
             data, hash = self.keychain.encrypt(PACKET_ARCHIVE_CHUNKS, msgpack.packb(chunks))
             self.store.put(NS_ARCHIVE_CHUNKS, hash, data)
             ids.append(hash)
-        for id, (count, size) in cache.chunk_counts.iteritems():
+        for id, (count, size) in cache.chunks.iteritems():
             if count > 1000000:
                 chunks.append((id, size))
             if len(chunks) > 100000:
@@ -105,6 +105,7 @@ class Archive(object):
         data, self.hash = self.keychain.encrypt(PACKET_ARCHIVE_METADATA, msgpack.packb(metadata))
         self.store.put(NS_ARCHIVE_METADATA, self.id, data)
         self.store.commit()
+        cache.commit()
 
     def stats(self, cache):
         osize = csize = usize = 0
@@ -207,7 +208,7 @@ class Archive(object):
         for id in self.metadata['items_ids']:
             self.store.delete(NS_ARCHIVE_ITEMS, id)
         self.store.commit()
-        cache.save()
+        cache.commit()
 
     def stat_attrs(self, st, path):
         item = {
@@ -277,7 +278,7 @@ class Archive(object):
                                       self.keychain.get_chunkify_seed()):
                     ids.append(cache.add_chunk(self.keychain.id_hash(chunk), chunk))
                     size += len(chunk)
-            cache.memorize_file_chunks(path_hash, st, ids)
+            cache.memorize_file(path_hash, st, ids)
         item = {'path': safe_path, 'chunks': ids, 'size': size}
         item.update(self.stat_attrs(st, path))
         self.add_item(item)

+ 0 - 1
darc/archiver.py

@@ -76,7 +76,6 @@ class Archiver(object):
         for path in args.paths:
             self._process(archive, cache, args.patterns, unicode(path))
         archive.save(args.archive.archive, cache)
-        cache.save()
         return self.exit_code
 
     def _process(self, archive, cache, patterns, path):

+ 117 - 57
darc/cache.py

@@ -1,7 +1,11 @@
+from ConfigParser import RawConfigParser
+import fcntl
 import msgpack
 import os
+import shutil
 
 from . import NS_ARCHIVE_CHUNKS, NS_CHUNK, PACKET_ARCHIVE_CHUNKS, PACKET_CHUNK
+from .hashindex import NSIndex
 
 
 class Cache(object):
@@ -9,111 +13,167 @@ class Cache(object):
     """
 
     def __init__(self, store, keychain):
-        self.tid = -1
+        self.txn_active = False
         self.store = store
         self.keychain = keychain
-        self.path = os.path.join(Cache.cache_dir_path(),
-                                 '%s.cache' % self.store.id.encode('hex'))
+        self.path = os.path.join(Cache.cache_dir_path(), self.store.id.encode('hex'))
+        if not os.path.exists(self.path):
+            self.create()
         self.open()
+        assert self.id == store.id
         if self.tid != store.tid:
-            self.init()
+            self.sync()
 
     @staticmethod
     def cache_dir_path():
         """Return path to directory used for storing users cache files"""
         return os.path.join(os.path.expanduser('~'), '.darc', 'cache')
 
+    def create(self):
+        """Create a new empty store at `path`
+        """
+        os.mkdir(self.path)
+        with open(os.path.join(self.path, 'README'), 'wb') as fd:
+            fd.write('This is a DARC cache')
+        config = RawConfigParser()
+        config.add_section('cache')
+        config.set('cache', 'version', '1')
+        config.set('cache', 'store_id', self.store.id.encode('hex'))
+        config.set('cache', 'tid', '0')
+        with open(os.path.join(self.path, 'config'), 'wb') as fd:
+            config.write(fd)
+        NSIndex.create(os.path.join(self.path, 'chunks'))
+        with open(os.path.join(self.path, 'files'), 'wb') as fd:
+            pass # empty file
+
     def open(self):
-        if not os.path.exists(self.path):
-            return
-        with open(self.path, 'rb') as fd:
-            #data, hash = self.keychain.decrypt(fd.read())
-            cache = msgpack.unpackb(fd.read())
-        assert cache['version'] == 1
-        self.chunk_counts = cache['chunk_counts']
-        self.file_chunks = cache['file_chunks']
-        self.tid = cache['tid']
-
-    def init(self):
+        if not os.path.isdir(self.path):
+            raise Exception('%s Does not look like a darc cache' % self.path)
+        self.lock_fd = open(os.path.join(self.path, 'README'), 'r+')
+        fcntl.flock(self.lock_fd, fcntl.LOCK_EX)
+        self.rollback()
+        self.config = RawConfigParser()
+        self.config.read(os.path.join(self.path, 'config'))
+        if self.config.getint('cache', 'version') != 1:
+            raise Exception('%s Does not look like a darc cache')
+        self.id = self.config.get('cache', 'store_id').decode('hex')
+        self.tid = self.config.getint('cache', 'tid')
+        self.chunks = NSIndex(os.path.join(self.path, 'chunks'))
+        with open(os.path.join(self.path, 'files'), 'rb') as fd:
+            self.files = {}
+            u = msgpack.Unpacker()
+            while True:
+                data = fd.read(64 * 1024)
+                if not data:
+                    break
+                u.feed(data)
+                for hash, item in u:
+                    if item[0] < 8:
+                        self.files[hash] = (item[0] + 1,) + item[1:]
+
+    def begin_txn(self):
+        # Initialize transaction snapshot
+        txn_dir = os.path.join(self.path, 'txn.tmp')
+        os.mkdir(txn_dir)
+        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
+        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
+        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
+        os.rename(os.path.join(self.path, 'txn.tmp'),
+                  os.path.join(self.path, 'txn.active'))
+        self.txn_active = True
+
+    def commit(self):
+        """Commit transaction
+        """
+        with open(os.path.join(self.path, 'files'), 'wb') as fd:
+            for item in self.files.iteritems():
+                msgpack.pack(item, fd)
+        for id, (count, size) in self.chunks.iteritems():
+            if count > 1000000:
+                self.chunks[id] = count - 1000000, size
+        self.config.set('cache', 'tid', self.store.tid)
+        with open(os.path.join(self.path, 'config'), 'w') as fd:
+            self.config.write(fd)
+        self.chunks.flush()
+        os.rename(os.path.join(self.path, 'txn.active'),
+                  os.path.join(self.path, 'txn.tmp'))
+        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
+        self.txn_active = False
+
+    def rollback(self):
+        """Roll back partial and aborted transactions
+        """
+        # Remove partial transaction
+        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
+            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
+        # Roll back active transaction
+        txn_dir = os.path.join(self.path, 'txn.active')
+        if os.path.exists(txn_dir):
+            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
+            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
+            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
+            shutil.rmtree(txn_dir)
+        self.txn_active = False
+
+    def sync(self):
         """Initializes cache by fetching and reading all archive indicies
         """
+        self.begin_txn()
         print 'Initializing cache...'
-        self.chunk_counts = {}
-        self.file_chunks = {}
         for id in self.store.list(NS_ARCHIVE_CHUNKS):
-            if len(id) != 32:
-                import ipdb
-                ipdb.set_trace()
             magic, data, hash = self.keychain.decrypt(self.store.get(NS_ARCHIVE_CHUNKS, id))
             assert magic == PACKET_ARCHIVE_CHUNKS
             chunks = msgpack.unpackb(data)
             for id, size in chunks:
                 try:
-                    count, size = self.chunk_counts[id]
-                    self.chunk_counts[id] = count + 1, size
+                    count, size = self.chunks[id]
+                    self.chunks[id] = count + 1, size
                 except KeyError:
-                    self.chunk_counts[id] = 1, size
-        self.save()
-
-    def filter_file_chunks(self):
-        for key, value in self.file_chunks.iteritems():
-            if value[0] < 8:
-                yield key, (value[0] + 1,) + value[1:]
-
-    def save(self):
-        for id, (count, size) in self.chunk_counts.iteritems():
-            if count > 1000000:
-                self.chunk_counts[id] = count - 1000000, size
-
-        cache = {'version': 1,
-                'tid': self.store.tid,
-                'chunk_counts': self.chunk_counts,
-                'file_chunks': dict(self.filter_file_chunks()),
-        }
-#        data, hash = self.keychain.encrypt_create(msgpack.packb(cache))
-        cachedir = os.path.dirname(self.path)
-        if not os.path.exists(cachedir):
-            os.makedirs(cachedir)
-        with open(self.path, 'wb') as fd:
-            fd.write(msgpack.packb(cache))
+                    self.chunks[id] = 1, size
 
     def add_chunk(self, id, data):
+        if not self.txn_active:
+            self.begin_txn()
         if self.seen_chunk(id):
             return self.chunk_incref(id)
         data, hash = self.keychain.encrypt(PACKET_CHUNK, data)
         csize = len(data)
         self.store.put(NS_CHUNK, id, data)
-        self.chunk_counts[id] = (1000001, csize)
+        self.chunks[id] = (1000001, csize)
         return id
 
     def seen_chunk(self, id):
-        return self.chunk_counts.get(id, (0, 0))[0]
+        return self.chunks.get(id, (0, 0))[0]
 
     def chunk_incref(self, id):
-        count, size = self.chunk_counts[id]
+        if not self.txn_active:
+            self.begin_txn()
+        count, size = self.chunks[id]
         if count < 1000000:
-            self.chunk_counts[id] = (count + 1000001, size)
+            self.chunks[id] = (count + 1000001, size)
         return id
 
     def chunk_decref(self, id):
-        count, size = self.chunk_counts[id]
+        if not self.txn_active:
+            self.begin_txn()
+        count, size = self.chunks[id]
         if count == 1:
-            del self.chunk_counts[id]
+            del self.chunks[id]
             self.store.delete(NS_CHUNK, id)
         else:
-            self.chunk_counts[id] = (count - 1, size)
+            self.chunks[id] = (count - 1, size)
 
     def file_known_and_unchanged(self, path_hash, st):
-        entry = self.file_chunks.get(path_hash)
+        entry = self.files.get(path_hash)
         if (entry and entry[3] == st.st_mtime
             and entry[2] == st.st_size and entry[1] == st.st_ino):
             # reset entry age
-            self.file_chunks[path_hash] = (0,) + entry[1:]
+            self.files[path_hash] = (0,) + entry[1:]
             return entry[4], entry[2]
         else:
             return None, 0
 
-    def memorize_file_chunks(self, path_hash, st, ids):
+    def memorize_file(self, path_hash, st, ids):
         # Entry: Age, inode, size, mtime, chunk ids
-        self.file_chunks[path_hash] = 0, st.st_ino, st.st_size, st.st_mtime, ids
+        self.files[path_hash] = 0, st.st_ino, st.st_size, st.st_mtime, ids
 

+ 6 - 0
darc/hashindex.pyx

@@ -33,6 +33,12 @@ cdef class IndexBase:
         if not key in self:
             self[key] = value
 
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
     def pop(self, key):
         value = self[key]
         del self[key]

+ 2 - 2
darc/store.py

@@ -64,6 +64,7 @@ class Store(object):
             raise Exception('%s Does not look like a darc store' % path)
         self.lock_fd = open(os.path.join(path, 'README'), 'r+')
         fcntl.flock(self.lock_fd, fcntl.LOCK_EX)
+        self.rollback()
         self.config = RawConfigParser()
         self.config.read(os.path.join(path, 'config'))
         if self.config.getint('store', 'version') != 1:
@@ -73,7 +74,6 @@ class Store(object):
         next_band = self.config.getint('state', 'next_band')
         max_band_size = self.config.getint('store', 'max_band_size')
         bands_per_dir = self.config.getint('store', 'bands_per_dir')
-        self.rollback()
         self.io = BandIO(self.path, next_band, max_band_size, bands_per_dir)
 
     def delete_bands(self):
@@ -130,7 +130,7 @@ class Store(object):
             return
         self.io.close_band()
         def lookup(ns, key):
-            return key in self.indexes[ns]
+            return key in self.get_index(ns)
         for band in self.compact:
             if self.bands[band] > 0:
                 for ns, key, data in self.io.iter_objects(band, lookup):