cache.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. import configparser
  2. from .remote import cache_if_remote
  3. from collections import namedtuple
  4. import os
  5. import stat
  6. from binascii import hexlify
  7. import shutil
  8. from .key import PlaintextKey
  9. from .logger import create_logger
  10. logger = create_logger()
  11. from .helpers import Error, get_cache_dir, decode_dict, st_mtime_ns, unhexlify, int_to_bigint, \
  12. bigint_to_int, format_file_size, yes
  13. from .locking import UpgradableLock
  14. from .hashindex import ChunkIndex
  15. import msgpack
  16. class Cache:
  17. """Client Side cache
  18. """
  19. class RepositoryReplay(Error):
  20. """Cache is newer than repository, refusing to continue"""
  21. class CacheInitAbortedError(Error):
  22. """Cache initialization aborted"""
  23. class RepositoryAccessAborted(Error):
  24. """Repository access aborted"""
  25. class EncryptionMethodMismatch(Error):
  26. """Repository encryption method changed since last access, refusing to continue"""
  27. def __init__(self, repository, key, manifest, path=None, sync=True, do_files=False, warn_if_unencrypted=True):
  28. self.lock = None
  29. self.timestamp = None
  30. self.lock = None
  31. self.txn_active = False
  32. self.repository = repository
  33. self.key = key
  34. self.manifest = manifest
  35. self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
  36. self.do_files = do_files
  37. # Warn user before sending data to a never seen before unencrypted repository
  38. if not os.path.exists(self.path):
  39. if warn_if_unencrypted and isinstance(key, PlaintextKey):
  40. msg = ("Warning: Attempting to access a previously unknown unencrypted repository!" +
  41. "\n" +
  42. "Do you want to continue? [yN] ")
  43. if not yes(msg, false_msg="Aborting.", default_notty=False,
  44. env_var_override='BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
  45. raise self.CacheInitAbortedError()
  46. self.create()
  47. self.open()
  48. # Warn user before sending data to a relocated repository
  49. if self.previous_location and self.previous_location != repository._location.canonical_path():
  50. msg = ("Warning: The repository at location {} was previously located at {}".format(repository._location.canonical_path(), self.previous_location) +
  51. "\n" +
  52. "Do you want to continue? [yN] ")
  53. if not yes(msg, false_msg="Aborting.", default_notty=False,
  54. env_var_override='BORG_RELOCATED_REPO_ACCESS_IS_OK'):
  55. raise self.RepositoryAccessAborted()
  56. if sync and self.manifest.id != self.manifest_id:
  57. # If repository is older than the cache something fishy is going on
  58. if self.timestamp and self.timestamp > manifest.timestamp:
  59. raise self.RepositoryReplay()
  60. # Make sure an encrypted repository has not been swapped for an unencrypted repository
  61. if self.key_type is not None and self.key_type != str(key.TYPE):
  62. raise self.EncryptionMethodMismatch()
  63. self.sync()
  64. self.commit()
  65. def __del__(self):
  66. self.close()
  67. def __str__(self):
  68. fmt = """\
  69. All archives: {0.total_size:>20s} {0.total_csize:>20s} {0.unique_csize:>20s}
  70. Unique chunks Total chunks
  71. Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
  72. return fmt.format(self.format_tuple())
  73. def format_tuple(self):
  74. # XXX: this should really be moved down to `hashindex.pyx`
  75. Summary = namedtuple('Summary', ['total_size', 'total_csize', 'unique_size', 'unique_csize', 'total_unique_chunks', 'total_chunks'])
  76. stats = Summary(*self.chunks.summarize())._asdict()
  77. for field in ['total_size', 'total_csize', 'unique_csize']:
  78. stats[field] = format_file_size(stats[field])
  79. return Summary(**stats)
  80. def create(self):
  81. """Create a new empty cache at `self.path`
  82. """
  83. os.makedirs(self.path)
  84. with open(os.path.join(self.path, 'README'), 'w') as fd:
  85. fd.write('This is a Borg cache')
  86. config = configparser.ConfigParser(interpolation=None)
  87. config.add_section('cache')
  88. config.set('cache', 'version', '1')
  89. config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
  90. config.set('cache', 'manifest', '')
  91. with open(os.path.join(self.path, 'config'), 'w') as fd:
  92. config.write(fd)
  93. ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
  94. os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
  95. with open(os.path.join(self.path, 'files'), 'wb') as fd:
  96. pass # empty file
  97. def destroy(self):
  98. """destroy the cache at `self.path`
  99. """
  100. self.close()
  101. os.remove(os.path.join(self.path, 'config')) # kill config first
  102. shutil.rmtree(self.path)
  103. def _do_open(self):
  104. self.config = configparser.ConfigParser(interpolation=None)
  105. config_path = os.path.join(self.path, 'config')
  106. self.config.read(config_path)
  107. try:
  108. cache_version = self.config.getint('cache', 'version')
  109. wanted_version = 1
  110. if cache_version != wanted_version:
  111. raise Exception('%s has unexpected cache version %d (wanted: %d).' % (
  112. config_path, cache_version, wanted_version))
  113. except configparser.NoSectionError as e:
  114. raise Exception('%s does not look like a Borg cache.' % config_path)
  115. self.id = self.config.get('cache', 'repository')
  116. self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
  117. self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
  118. self.key_type = self.config.get('cache', 'key_type', fallback=None)
  119. self.previous_location = self.config.get('cache', 'previous_location', fallback=None)
  120. self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
  121. self.files = None
  122. def open(self):
  123. if not os.path.isdir(self.path):
  124. raise Exception('%s Does not look like a Borg cache' % self.path)
  125. self.lock = UpgradableLock(os.path.join(self.path, 'lock'), exclusive=True).acquire()
  126. self.rollback()
  127. def close(self):
  128. if self.lock:
  129. self.lock.release()
  130. self.lock = None
  131. def _read_files(self):
  132. self.files = {}
  133. self._newest_mtime = 0
  134. logger.info('reading files cache')
  135. with open(os.path.join(self.path, 'files'), 'rb') as fd:
  136. u = msgpack.Unpacker(use_list=True)
  137. while True:
  138. data = fd.read(64 * 1024)
  139. if not data:
  140. break
  141. u.feed(data)
  142. for path_hash, item in u:
  143. item[0] += 1
  144. # in the end, this takes about 240 Bytes per file
  145. self.files[path_hash] = msgpack.packb(item)
  146. def begin_txn(self):
  147. # Initialize transaction snapshot
  148. txn_dir = os.path.join(self.path, 'txn.tmp')
  149. os.mkdir(txn_dir)
  150. shutil.copy(os.path.join(self.path, 'config'), txn_dir)
  151. shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
  152. shutil.copy(os.path.join(self.path, 'files'), txn_dir)
  153. os.rename(os.path.join(self.path, 'txn.tmp'),
  154. os.path.join(self.path, 'txn.active'))
  155. self.txn_active = True
  156. def commit(self):
  157. """Commit transaction
  158. """
  159. if not self.txn_active:
  160. return
  161. if self.files is not None:
  162. with open(os.path.join(self.path, 'files'), 'wb') as fd:
  163. for path_hash, item in self.files.items():
  164. # Discard cached files with the newest mtime to avoid
  165. # issues with filesystem snapshots and mtime precision
  166. item = msgpack.unpackb(item)
  167. if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
  168. msgpack.pack((path_hash, item), fd)
  169. self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
  170. self.config.set('cache', 'timestamp', self.manifest.timestamp)
  171. self.config.set('cache', 'key_type', str(self.key.TYPE))
  172. self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
  173. with open(os.path.join(self.path, 'config'), 'w') as fd:
  174. self.config.write(fd)
  175. self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
  176. os.rename(os.path.join(self.path, 'txn.active'),
  177. os.path.join(self.path, 'txn.tmp'))
  178. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  179. self.txn_active = False
  180. def rollback(self):
  181. """Roll back partial and aborted transactions
  182. """
  183. # Remove partial transaction
  184. if os.path.exists(os.path.join(self.path, 'txn.tmp')):
  185. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  186. # Roll back active transaction
  187. txn_dir = os.path.join(self.path, 'txn.active')
  188. if os.path.exists(txn_dir):
  189. shutil.copy(os.path.join(txn_dir, 'config'), self.path)
  190. shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
  191. shutil.copy(os.path.join(txn_dir, 'files'), self.path)
  192. os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
  193. if os.path.exists(os.path.join(self.path, 'txn.tmp')):
  194. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  195. self.txn_active = False
  196. self._do_open()
  197. def sync(self):
  198. """Re-synchronize chunks cache with repository.
  199. Maintains a directory with known backup archive indexes, so it only
  200. needs to fetch infos from repo and build a chunk index once per backup
  201. archive.
  202. If out of sync, missing archive indexes get added, outdated indexes
  203. get removed and a new master chunks index is built by merging all
  204. archive indexes.
  205. """
  206. archive_path = os.path.join(self.path, 'chunks.archive.d')
  207. def mkpath(id, suffix=''):
  208. id_hex = hexlify(id).decode('ascii')
  209. path = os.path.join(archive_path, id_hex + suffix)
  210. return path.encode('utf-8')
  211. def cached_archives():
  212. if self.do_cache:
  213. fns = os.listdir(archive_path)
  214. # filenames with 64 hex digits == 256bit
  215. return set(unhexlify(fn) for fn in fns if len(fn) == 64)
  216. else:
  217. return set()
  218. def repo_archives():
  219. return set(info[b'id'] for info in self.manifest.archives.values())
  220. def cleanup_outdated(ids):
  221. for id in ids:
  222. os.unlink(mkpath(id))
  223. def add(chunk_idx, id, size, csize, incr=1):
  224. try:
  225. count, size, csize = chunk_idx[id]
  226. chunk_idx[id] = count + incr, size, csize
  227. except KeyError:
  228. chunk_idx[id] = incr, size, csize
  229. def fetch_and_build_idx(archive_id, repository, key):
  230. chunk_idx = ChunkIndex()
  231. cdata = repository.get(archive_id)
  232. data = key.decrypt(archive_id, cdata)
  233. add(chunk_idx, archive_id, len(data), len(cdata))
  234. archive = msgpack.unpackb(data)
  235. if archive[b'version'] != 1:
  236. raise Exception('Unknown archive metadata version')
  237. decode_dict(archive, (b'name',))
  238. unpacker = msgpack.Unpacker()
  239. for item_id, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
  240. data = key.decrypt(item_id, chunk)
  241. add(chunk_idx, item_id, len(data), len(chunk))
  242. unpacker.feed(data)
  243. for item in unpacker:
  244. if not isinstance(item, dict):
  245. logger.error('Error: Did not get expected metadata dict - archive corrupted!')
  246. continue
  247. if b'chunks' in item:
  248. for chunk_id, size, csize in item[b'chunks']:
  249. add(chunk_idx, chunk_id, size, csize)
  250. if self.do_cache:
  251. fn = mkpath(archive_id)
  252. fn_tmp = mkpath(archive_id, suffix='.tmp')
  253. try:
  254. chunk_idx.write(fn_tmp)
  255. except Exception:
  256. os.unlink(fn_tmp)
  257. else:
  258. os.rename(fn_tmp, fn)
  259. return chunk_idx
  260. def lookup_name(archive_id):
  261. for name, info in self.manifest.archives.items():
  262. if info[b'id'] == archive_id:
  263. return name
  264. def create_master_idx(chunk_idx):
  265. logger.info('Synchronizing chunks cache...')
  266. cached_ids = cached_archives()
  267. archive_ids = repo_archives()
  268. logger.info('Archives: %d, w/ cached Idx: %d, w/ outdated Idx: %d, w/o cached Idx: %d.' % (
  269. len(archive_ids), len(cached_ids),
  270. len(cached_ids - archive_ids), len(archive_ids - cached_ids), ))
  271. # deallocates old hashindex, creates empty hashindex:
  272. chunk_idx.clear()
  273. cleanup_outdated(cached_ids - archive_ids)
  274. if archive_ids:
  275. chunk_idx = None
  276. for archive_id in archive_ids:
  277. archive_name = lookup_name(archive_id)
  278. if archive_id in cached_ids:
  279. archive_chunk_idx_path = mkpath(archive_id)
  280. logger.info("Reading cached archive chunk index for %s ..." % archive_name)
  281. archive_chunk_idx = ChunkIndex.read(archive_chunk_idx_path)
  282. else:
  283. logger.info('Fetching and building archive index for %s ...' % archive_name)
  284. archive_chunk_idx = fetch_and_build_idx(archive_id, repository, self.key)
  285. logger.info("Merging into master chunks index ...")
  286. if chunk_idx is None:
  287. # we just use the first archive's idx as starting point,
  288. # to avoid growing the hash table from 0 size and also
  289. # to save 1 merge call.
  290. chunk_idx = archive_chunk_idx
  291. else:
  292. chunk_idx.merge(archive_chunk_idx)
  293. logger.info('Done.')
  294. return chunk_idx
  295. def legacy_cleanup():
  296. """bring old cache dirs into the desired state (cleanup and adapt)"""
  297. try:
  298. os.unlink(os.path.join(self.path, 'chunks.archive'))
  299. except:
  300. pass
  301. try:
  302. os.unlink(os.path.join(self.path, 'chunks.archive.tmp'))
  303. except:
  304. pass
  305. try:
  306. os.mkdir(archive_path)
  307. except:
  308. pass
  309. self.begin_txn()
  310. repository = cache_if_remote(self.repository)
  311. legacy_cleanup()
  312. # TEMPORARY HACK: to avoid archive index caching, create a FILE named ~/.cache/borg/REPOID/chunks.archive.d -
  313. # this is only recommended if you have a fast, low latency connection to your repo (e.g. if repo is local disk)
  314. self.do_cache = os.path.isdir(archive_path)
  315. self.chunks = create_master_idx(self.chunks)
  316. def add_chunk(self, id, data, stats):
  317. if not self.txn_active:
  318. self.begin_txn()
  319. size = len(data)
  320. if self.seen_chunk(id, size):
  321. return self.chunk_incref(id, stats)
  322. data = self.key.encrypt(data)
  323. csize = len(data)
  324. self.repository.put(id, data, wait=False)
  325. self.chunks[id] = (1, size, csize)
  326. stats.update(size, csize, True)
  327. return id, size, csize
  328. def seen_chunk(self, id, size=None):
  329. refcount, stored_size, _ = self.chunks.get(id, (0, None, None))
  330. if size is not None and stored_size is not None and size != stored_size:
  331. # we already have a chunk with that id, but different size.
  332. # this is either a hash collision (unlikely) or corruption or a bug.
  333. raise Exception("chunk has same id [%r], but different size (stored: %d new: %d)!" % (
  334. id, stored_size, size))
  335. return refcount
  336. def chunk_incref(self, id, stats):
  337. if not self.txn_active:
  338. self.begin_txn()
  339. count, size, csize = self.chunks[id]
  340. self.chunks[id] = (count + 1, size, csize)
  341. stats.update(size, csize, False)
  342. return id, size, csize
  343. def chunk_decref(self, id, stats):
  344. if not self.txn_active:
  345. self.begin_txn()
  346. count, size, csize = self.chunks[id]
  347. if count == 1:
  348. del self.chunks[id]
  349. self.repository.delete(id, wait=False)
  350. stats.update(-size, -csize, True)
  351. else:
  352. self.chunks[id] = (count - 1, size, csize)
  353. stats.update(-size, -csize, False)
  354. def file_known_and_unchanged(self, path_hash, st):
  355. if not (self.do_files and stat.S_ISREG(st.st_mode)):
  356. return None
  357. if self.files is None:
  358. self._read_files()
  359. entry = self.files.get(path_hash)
  360. if not entry:
  361. return None
  362. entry = msgpack.unpackb(entry)
  363. if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
  364. # reset entry age
  365. entry[0] = 0
  366. self.files[path_hash] = msgpack.packb(entry)
  367. return entry[4]
  368. else:
  369. return None
  370. def memorize_file(self, path_hash, st, ids):
  371. if not (self.do_files and stat.S_ISREG(st.st_mode)):
  372. return
  373. # Entry: Age, inode, size, mtime, chunk ids
  374. mtime_ns = st_mtime_ns(st)
  375. self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
  376. self._newest_mtime = max(self._newest_mtime, mtime_ns)