|
@@ -19,6 +19,7 @@ from .helpers import yes
|
|
from .item import Item
|
|
from .item import Item
|
|
from .key import PlaintextKey
|
|
from .key import PlaintextKey
|
|
from .locking import UpgradableLock
|
|
from .locking import UpgradableLock
|
|
|
|
+from .platform import SaveFile
|
|
from .remote import cache_if_remote
|
|
from .remote import cache_if_remote
|
|
|
|
|
|
ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize')
|
|
ChunkListEntry = namedtuple('ChunkListEntry', 'id size csize')
|
|
@@ -141,11 +142,11 @@ Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
|
|
config.set('cache', 'version', '1')
|
|
config.set('cache', 'version', '1')
|
|
config.set('cache', 'repository', self.repository.id_str)
|
|
config.set('cache', 'repository', self.repository.id_str)
|
|
config.set('cache', 'manifest', '')
|
|
config.set('cache', 'manifest', '')
|
|
- with open(os.path.join(self.path, 'config'), 'w') as fd:
|
|
|
|
|
|
+ with SaveFile(os.path.join(self.path, 'config')) as fd:
|
|
config.write(fd)
|
|
config.write(fd)
|
|
ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
|
|
ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
|
|
os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
|
|
os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
|
|
- with open(os.path.join(self.path, 'files'), 'wb') as fd:
|
|
|
|
|
|
+ with SaveFile(os.path.join(self.path, 'files'), binary=True) as fd:
|
|
pass # empty file
|
|
pass # empty file
|
|
|
|
|
|
def _do_open(self):
|
|
def _do_open(self):
|
|
@@ -212,7 +213,7 @@ Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
|
|
if not self.txn_active:
|
|
if not self.txn_active:
|
|
return
|
|
return
|
|
if self.files is not None:
|
|
if self.files is not None:
|
|
- with open(os.path.join(self.path, 'files'), 'wb') as fd:
|
|
|
|
|
|
+ with SaveFile(os.path.join(self.path, 'files'), binary=True) as fd:
|
|
for path_hash, item in self.files.items():
|
|
for path_hash, item in self.files.items():
|
|
# Discard cached files with the newest mtime to avoid
|
|
# Discard cached files with the newest mtime to avoid
|
|
# issues with filesystem snapshots and mtime precision
|
|
# issues with filesystem snapshots and mtime precision
|
|
@@ -223,7 +224,7 @@ Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
|
|
self.config.set('cache', 'timestamp', self.manifest.timestamp)
|
|
self.config.set('cache', 'timestamp', self.manifest.timestamp)
|
|
self.config.set('cache', 'key_type', str(self.key.TYPE))
|
|
self.config.set('cache', 'key_type', str(self.key.TYPE))
|
|
self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
|
|
self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
|
|
- with open(os.path.join(self.path, 'config'), 'w') as fd:
|
|
|
|
|
|
+ with SaveFile(os.path.join(self.path, 'config')) as fd:
|
|
self.config.write(fd)
|
|
self.config.write(fd)
|
|
self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
|
|
self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
|
|
os.rename(os.path.join(self.path, 'txn.active'),
|
|
os.rename(os.path.join(self.path, 'txn.active'),
|