cache.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. import configparser
  2. from .remote import cache_if_remote
  3. from collections import namedtuple
  4. import os
  5. import stat
  6. from binascii import unhexlify
  7. import shutil
  8. from .key import PlaintextKey
  9. from .logger import create_logger
  10. logger = create_logger()
  11. from .helpers import Error, Manifest, get_cache_dir, decode_dict, int_to_bigint, \
  12. bigint_to_int, format_file_size, yes, bin_to_hex, Location, safe_ns, parse_stringified_list
  13. from .locking import Lock
  14. from .hashindex import ChunkIndex
  15. import msgpack
  16. class Cache:
  17. """Client Side cache
  18. """
  19. class RepositoryIDNotUnique(Error):
  20. """Cache is newer than repository - do you have multiple, independently updated repos with same ID?"""
  21. class RepositoryReplay(Error):
  22. """Cache is newer than repository - this is either an attack or unsafe (multiple repos with same ID)"""
  23. class CacheInitAbortedError(Error):
  24. """Cache initialization aborted"""
  25. class RepositoryAccessAborted(Error):
  26. """Repository access aborted"""
  27. class EncryptionMethodMismatch(Error):
  28. """Repository encryption method changed since last access, refusing to continue"""
  29. @staticmethod
  30. def break_lock(repository, path=None):
  31. path = path or os.path.join(get_cache_dir(), bin_to_hex(repository.id))
  32. Lock(os.path.join(path, 'lock'), exclusive=True).break_lock()
  33. @staticmethod
  34. def destroy(repository, path=None):
  35. """destroy the cache for ``repository`` or at ``path``"""
  36. path = path or os.path.join(get_cache_dir(), bin_to_hex(repository.id))
  37. config = os.path.join(path, 'config')
  38. if os.path.exists(config):
  39. os.remove(config) # kill config first
  40. shutil.rmtree(path)
  41. def __init__(self, repository, key, manifest, path=None, sync=True, do_files=False, warn_if_unencrypted=True,
  42. lock_wait=None):
  43. """
  44. :param lock_wait: timeout for lock acquisition (int [s] or None [wait forever])
  45. """
  46. self.lock = None
  47. self.timestamp = None
  48. self.lock = None
  49. self.txn_active = False
  50. self.repository = repository
  51. self.key = key
  52. self.manifest = manifest
  53. self.path = path or os.path.join(get_cache_dir(), bin_to_hex(repository.id))
  54. self.do_files = do_files
  55. # Warn user before sending data to a never seen before unencrypted repository
  56. if not os.path.exists(self.path):
  57. if warn_if_unencrypted and isinstance(key, PlaintextKey):
  58. msg = ("Warning: Attempting to access a previously unknown unencrypted repository!" +
  59. "\n" +
  60. "Do you want to continue? [yN] ")
  61. if not yes(msg, false_msg="Aborting.", invalid_msg="Invalid answer, aborting.",
  62. retry=False, env_var_override='BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
  63. raise self.CacheInitAbortedError()
  64. self.create()
  65. self.open(lock_wait=lock_wait)
  66. try:
  67. # Warn user before sending data to a relocated repository
  68. if self.previous_location and self.previous_location != repository._location.canonical_path():
  69. msg = ("Warning: The repository at location {} was previously located at {}".format(repository._location.canonical_path(), self.previous_location) +
  70. "\n" +
  71. "Do you want to continue? [yN] ")
  72. if not yes(msg, false_msg="Aborting.", invalid_msg="Invalid answer, aborting.",
  73. retry=False, env_var_override='BORG_RELOCATED_REPO_ACCESS_IS_OK'):
  74. raise self.RepositoryAccessAborted()
  75. # adapt on-disk config immediately if the new location was accepted
  76. self.begin_txn()
  77. self.commit()
  78. if not self.check_cache_compatibility():
  79. self.wipe_cache()
  80. self.update_compatibility()
  81. if sync and self.manifest.id != self.manifest_id:
  82. # If repository is older than the cache something fishy is going on
  83. if self.timestamp and self.timestamp > manifest.timestamp:
  84. if isinstance(key, PlaintextKey):
  85. raise self.RepositoryIDNotUnique()
  86. else:
  87. raise self.RepositoryReplay()
  88. # Make sure an encrypted repository has not been swapped for an unencrypted repository
  89. if self.key_type is not None and self.key_type != str(key.TYPE):
  90. raise self.EncryptionMethodMismatch()
  91. self.sync()
  92. self.commit()
  93. except:
  94. self.close()
  95. raise
  96. def __enter__(self):
  97. return self
  98. def __exit__(self, exc_type, exc_val, exc_tb):
  99. self.close()
  100. def __str__(self):
  101. fmt = """\
  102. All archives: {0.total_size:>20s} {0.total_csize:>20s} {0.unique_csize:>20s}
  103. Unique chunks Total chunks
  104. Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
  105. return fmt.format(self.format_tuple())
  106. def format_tuple(self):
  107. # XXX: this should really be moved down to `hashindex.pyx`
  108. Summary = namedtuple('Summary', ['total_size', 'total_csize', 'unique_size', 'unique_csize', 'total_unique_chunks', 'total_chunks'])
  109. stats = Summary(*self.chunks.summarize())._asdict()
  110. for field in ['total_size', 'total_csize', 'unique_csize']:
  111. stats[field] = format_file_size(stats[field])
  112. return Summary(**stats)
  113. def create(self):
  114. """Create a new empty cache at `self.path`
  115. """
  116. os.makedirs(self.path)
  117. with open(os.path.join(self.path, 'README'), 'w') as fd:
  118. fd.write('This is a Borg cache')
  119. config = configparser.ConfigParser(interpolation=None)
  120. config.add_section('cache')
  121. config.set('cache', 'version', '1')
  122. config.set('cache', 'repository', bin_to_hex(self.repository.id))
  123. config.set('cache', 'manifest', '')
  124. with open(os.path.join(self.path, 'config'), 'w') as fd:
  125. config.write(fd)
  126. ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
  127. os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
  128. with open(os.path.join(self.path, 'files'), 'wb') as fd:
  129. pass # empty file
  130. def _check_upgrade(self, config_path):
  131. try:
  132. cache_version = self.config.getint('cache', 'version')
  133. wanted_version = 1
  134. if cache_version != wanted_version:
  135. self.close()
  136. raise Exception('%s has unexpected cache version %d (wanted: %d).' % (
  137. config_path, cache_version, wanted_version))
  138. except configparser.NoSectionError:
  139. self.close()
  140. raise Exception('%s does not look like a Borg cache.' % config_path) from None
  141. # borg < 1.0.8rc1 had different canonicalization for the repo location (see #1655 and #1741).
  142. cache_loc = self.config.get('cache', 'previous_location', fallback=None)
  143. if cache_loc:
  144. repo_loc = self.repository._location.canonical_path()
  145. rl = Location(repo_loc)
  146. cl = Location(cache_loc)
  147. if cl.proto == rl.proto and cl.user == rl.user and cl.host == rl.host and cl.port == rl.port \
  148. and \
  149. cl.path and rl.path and \
  150. cl.path.startswith('/~/') and rl.path.startswith('/./') and cl.path[3:] == rl.path[3:]:
  151. # everything is same except the expected change in relative path canonicalization,
  152. # update previous_location to avoid warning / user query about changed location:
  153. self.config.set('cache', 'previous_location', repo_loc)
  154. def _do_open(self):
  155. self.config = configparser.ConfigParser(interpolation=None)
  156. config_path = os.path.join(self.path, 'config')
  157. self.config.read(config_path)
  158. self._check_upgrade(config_path)
  159. self.id = self.config.get('cache', 'repository')
  160. self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
  161. self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
  162. self.key_type = self.config.get('cache', 'key_type', fallback=None)
  163. self.previous_location = self.config.get('cache', 'previous_location', fallback=None)
  164. self.ignored_features = set(parse_stringified_list(self.config.get('cache', 'ignored_features', fallback='')))
  165. self.mandatory_features = set(parse_stringified_list(self.config.get('cache', 'mandatory_features', fallback='')))
  166. self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
  167. self.files = None
  168. def open(self, lock_wait=None):
  169. if not os.path.isdir(self.path):
  170. raise Exception('%s Does not look like a Borg cache' % self.path)
  171. self.lock = Lock(os.path.join(self.path, 'lock'), exclusive=True, timeout=lock_wait).acquire()
  172. self.rollback()
  173. def close(self):
  174. if self.lock is not None:
  175. self.lock.release()
  176. self.lock = None
  177. def _read_files(self):
  178. self.files = {}
  179. self._newest_mtime = None
  180. logger.debug('Reading files cache ...')
  181. with open(os.path.join(self.path, 'files'), 'rb') as fd:
  182. u = msgpack.Unpacker(use_list=True)
  183. while True:
  184. data = fd.read(64 * 1024)
  185. if not data:
  186. break
  187. u.feed(data)
  188. try:
  189. for path_hash, item in u:
  190. item[0] += 1
  191. # in the end, this takes about 240 Bytes per file
  192. self.files[path_hash] = msgpack.packb(item)
  193. except (TypeError, ValueError) as exc:
  194. logger.warning('The files cache seems corrupt, ignoring it. '
  195. 'Expect lower performance. [%s]' % str(exc))
  196. self.files = {}
  197. return
  198. def begin_txn(self):
  199. # Initialize transaction snapshot
  200. txn_dir = os.path.join(self.path, 'txn.tmp')
  201. os.mkdir(txn_dir)
  202. shutil.copy(os.path.join(self.path, 'config'), txn_dir)
  203. shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
  204. shutil.copy(os.path.join(self.path, 'files'), txn_dir)
  205. os.rename(os.path.join(self.path, 'txn.tmp'),
  206. os.path.join(self.path, 'txn.active'))
  207. self.txn_active = True
  208. def commit(self):
  209. """Commit transaction
  210. """
  211. if not self.txn_active:
  212. return
  213. if self.files is not None:
  214. if self._newest_mtime is None:
  215. # was never set because no files were modified/added
  216. self._newest_mtime = 2 ** 63 - 1 # nanoseconds, good until y2262
  217. ttl = int(os.environ.get('BORG_FILES_CACHE_TTL', 20))
  218. with open(os.path.join(self.path, 'files'), 'wb') as fd:
  219. for path_hash, item in self.files.items():
  220. # Only keep files seen in this backup that are older than newest mtime seen in this backup -
  221. # this is to avoid issues with filesystem snapshots and mtime granularity.
  222. # Also keep files from older backups that have not reached BORG_FILES_CACHE_TTL yet.
  223. item = msgpack.unpackb(item)
  224. age = item[0]
  225. if age == 0 and bigint_to_int(item[3]) < self._newest_mtime or \
  226. age > 0 and age < ttl:
  227. msgpack.pack((path_hash, item), fd)
  228. self.config.set('cache', 'manifest', bin_to_hex(self.manifest.id))
  229. self.config.set('cache', 'timestamp', self.manifest.timestamp)
  230. self.config.set('cache', 'key_type', str(self.key.TYPE))
  231. self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
  232. self.config.set('cache', 'ignored_features', ','.join(self.ignored_features))
  233. self.config.set('cache', 'mandatory_features', ','.join(self.mandatory_features))
  234. with open(os.path.join(self.path, 'config'), 'w') as fd:
  235. self.config.write(fd)
  236. self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
  237. os.rename(os.path.join(self.path, 'txn.active'),
  238. os.path.join(self.path, 'txn.tmp'))
  239. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  240. self.txn_active = False
  241. def rollback(self):
  242. """Roll back partial and aborted transactions
  243. """
  244. # Remove partial transaction
  245. if os.path.exists(os.path.join(self.path, 'txn.tmp')):
  246. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  247. # Roll back active transaction
  248. txn_dir = os.path.join(self.path, 'txn.active')
  249. if os.path.exists(txn_dir):
  250. shutil.copy(os.path.join(txn_dir, 'config'), self.path)
  251. shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
  252. shutil.copy(os.path.join(txn_dir, 'files'), self.path)
  253. os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
  254. if os.path.exists(os.path.join(self.path, 'txn.tmp')):
  255. shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
  256. self.txn_active = False
  257. self._do_open()
  258. def sync(self):
  259. """Re-synchronize chunks cache with repository.
  260. Maintains a directory with known backup archive indexes, so it only
  261. needs to fetch infos from repo and build a chunk index once per backup
  262. archive.
  263. If out of sync, missing archive indexes get added, outdated indexes
  264. get removed and a new master chunks index is built by merging all
  265. archive indexes.
  266. """
  267. archive_path = os.path.join(self.path, 'chunks.archive.d')
  268. def mkpath(id, suffix=''):
  269. id_hex = bin_to_hex(id)
  270. path = os.path.join(archive_path, id_hex + suffix)
  271. return path.encode('utf-8')
  272. def cached_archives():
  273. if self.do_cache:
  274. fns = os.listdir(archive_path)
  275. # filenames with 64 hex digits == 256bit
  276. return set(unhexlify(fn) for fn in fns if len(fn) == 64)
  277. else:
  278. return set()
  279. def repo_archives():
  280. return set(info[b'id'] for info in self.manifest.archives.values())
  281. def cleanup_outdated(ids):
  282. for id in ids:
  283. os.unlink(mkpath(id))
  284. def fetch_and_build_idx(archive_id, repository, key):
  285. chunk_idx = ChunkIndex()
  286. cdata = repository.get(archive_id)
  287. data = key.decrypt(archive_id, cdata)
  288. chunk_idx.add(archive_id, 1, len(data), len(cdata))
  289. archive = msgpack.unpackb(data)
  290. if archive[b'version'] != 1:
  291. raise Exception('Unknown archive metadata version')
  292. decode_dict(archive, (b'name',))
  293. unpacker = msgpack.Unpacker()
  294. for item_id, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
  295. data = key.decrypt(item_id, chunk)
  296. chunk_idx.add(item_id, 1, len(data), len(chunk))
  297. unpacker.feed(data)
  298. for item in unpacker:
  299. if not isinstance(item, dict):
  300. logger.error('Error: Did not get expected metadata dict - archive corrupted!')
  301. continue
  302. if b'chunks' in item:
  303. for chunk_id, size, csize in item[b'chunks']:
  304. chunk_idx.add(chunk_id, 1, size, csize)
  305. if self.do_cache:
  306. fn = mkpath(archive_id)
  307. fn_tmp = mkpath(archive_id, suffix='.tmp')
  308. try:
  309. chunk_idx.write(fn_tmp)
  310. except Exception:
  311. os.unlink(fn_tmp)
  312. else:
  313. os.rename(fn_tmp, fn)
  314. return chunk_idx
  315. def lookup_name(archive_id):
  316. for name, info in self.manifest.archives.items():
  317. if info[b'id'] == archive_id:
  318. return name
  319. def create_master_idx(chunk_idx):
  320. logger.info('Synchronizing chunks cache...')
  321. cached_ids = cached_archives()
  322. archive_ids = repo_archives()
  323. logger.info('Archives: %d, w/ cached Idx: %d, w/ outdated Idx: %d, w/o cached Idx: %d.' % (
  324. len(archive_ids), len(cached_ids),
  325. len(cached_ids - archive_ids), len(archive_ids - cached_ids), ))
  326. # deallocates old hashindex, creates empty hashindex:
  327. chunk_idx.clear()
  328. cleanup_outdated(cached_ids - archive_ids)
  329. if archive_ids:
  330. chunk_idx = None
  331. for archive_id in archive_ids:
  332. archive_name = lookup_name(archive_id)
  333. if archive_id in cached_ids:
  334. archive_chunk_idx_path = mkpath(archive_id)
  335. logger.info("Reading cached archive chunk index for %s ..." % archive_name)
  336. archive_chunk_idx = ChunkIndex.read(archive_chunk_idx_path)
  337. else:
  338. logger.info('Fetching and building archive index for %s ...' % archive_name)
  339. archive_chunk_idx = fetch_and_build_idx(archive_id, repository, self.key)
  340. logger.info("Merging into master chunks index ...")
  341. if chunk_idx is None:
  342. # we just use the first archive's idx as starting point,
  343. # to avoid growing the hash table from 0 size and also
  344. # to save 1 merge call.
  345. chunk_idx = archive_chunk_idx
  346. else:
  347. chunk_idx.merge(archive_chunk_idx)
  348. logger.info('Done.')
  349. return chunk_idx
  350. def legacy_cleanup():
  351. """bring old cache dirs into the desired state (cleanup and adapt)"""
  352. try:
  353. os.unlink(os.path.join(self.path, 'chunks.archive'))
  354. except:
  355. pass
  356. try:
  357. os.unlink(os.path.join(self.path, 'chunks.archive.tmp'))
  358. except:
  359. pass
  360. try:
  361. os.mkdir(archive_path)
  362. except:
  363. pass
  364. # The cache can be used by a command that e.g. only checks against Manifest.Operation.WRITE,
  365. # which does not have to include all flags from Manifest.Operation.READ.
  366. # Since the sync will attempt to read archives, check compatibility with Manifest.Operation.READ.
  367. self.manifest.check_repository_compatibility((Manifest.Operation.READ, ))
  368. self.begin_txn()
  369. with cache_if_remote(self.repository) as repository:
  370. legacy_cleanup()
  371. # TEMPORARY HACK: to avoid archive index caching, create a FILE named ~/.cache/borg/REPOID/chunks.archive.d -
  372. # this is only recommended if you have a fast, low latency connection to your repo (e.g. if repo is local disk)
  373. self.do_cache = os.path.isdir(archive_path)
  374. self.chunks = create_master_idx(self.chunks)
  375. def check_cache_compatibility(self):
  376. my_features = Manifest.SUPPORTED_REPO_FEATURES
  377. if self.ignored_features & my_features:
  378. # The cache might not contain references of chunks that need a feature that is mandatory for some operation
  379. # and which this version supports. To avoid corruption while executing that operation force rebuild.
  380. return False
  381. if not self.mandatory_features <= my_features:
  382. # The cache was build with consideration to at least one feature that this version does not understand.
  383. # This client might misinterpret the cache. Thus force a rebuild.
  384. return False
  385. return True
  386. def wipe_cache(self):
  387. logger.warning("Discarding incompatible cache and forcing a cache rebuild")
  388. archive_path = os.path.join(self.path, 'chunks.archive.d')
  389. if os.path.isdir(archive_path):
  390. shutil.rmtree(os.path.join(self.path, 'chunks.archive.d'))
  391. os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
  392. self.chunks = ChunkIndex()
  393. with open(os.path.join(self.path, 'files'), 'wb'):
  394. pass # empty file
  395. self.manifest_id = ''
  396. self.config.set('cache', 'manifest', '')
  397. self.ignored_features = set()
  398. self.mandatory_features = set()
  399. def update_compatibility(self):
  400. operation_to_features_map = self.manifest.get_all_mandatory_features()
  401. my_features = Manifest.SUPPORTED_REPO_FEATURES
  402. repo_features = set()
  403. for operation, features in operation_to_features_map.items():
  404. repo_features.update(features)
  405. self.ignored_features.update(repo_features - my_features)
  406. self.mandatory_features.update(repo_features & my_features)
  407. def add_chunk(self, id, data, stats):
  408. if not self.txn_active:
  409. self.begin_txn()
  410. size = len(data)
  411. if self.seen_chunk(id, size):
  412. return self.chunk_incref(id, stats)
  413. data = self.key.encrypt(data)
  414. csize = len(data)
  415. self.repository.put(id, data, wait=False)
  416. self.chunks[id] = (1, size, csize)
  417. stats.update(size, csize, True)
  418. return id, size, csize
  419. def seen_chunk(self, id, size=None):
  420. refcount, stored_size, _ = self.chunks.get(id, (0, None, None))
  421. if size is not None and stored_size is not None and size != stored_size:
  422. # we already have a chunk with that id, but different size.
  423. # this is either a hash collision (unlikely) or corruption or a bug.
  424. raise Exception("chunk has same id [%r], but different size (stored: %d new: %d)!" % (
  425. id, stored_size, size))
  426. return refcount
  427. def chunk_incref(self, id, stats):
  428. if not self.txn_active:
  429. self.begin_txn()
  430. count, size, csize = self.chunks.incref(id)
  431. stats.update(size, csize, False)
  432. return id, size, csize
  433. def chunk_decref(self, id, stats):
  434. if not self.txn_active:
  435. self.begin_txn()
  436. count, size, csize = self.chunks.decref(id)
  437. if count == 0:
  438. del self.chunks[id]
  439. self.repository.delete(id, wait=False)
  440. stats.update(-size, -csize, True)
  441. else:
  442. stats.update(-size, -csize, False)
  443. def file_known_and_unchanged(self, path_hash, st, ignore_inode=False):
  444. if not (self.do_files and stat.S_ISREG(st.st_mode)):
  445. return None
  446. if self.files is None:
  447. self._read_files()
  448. entry = self.files.get(path_hash)
  449. if not entry:
  450. return None
  451. entry = msgpack.unpackb(entry)
  452. if (entry[2] == st.st_size and bigint_to_int(entry[3]) == st.st_mtime_ns and
  453. (ignore_inode or entry[1] == st.st_ino)):
  454. # reset entry age
  455. entry[0] = 0
  456. # we ignored the inode number in the comparison above or it is still same.
  457. # if it is still the same, replacing it doesn't change it.
  458. # if we ignored it, a reason for doing that is that files were moved to a new
  459. # disk / new fs (so a one-time change of inode number is expected) and we wanted
  460. # to avoid everything getting chunked again. to be able to re-enable the inode
  461. # number comparison in a future backup run (and avoid chunking everything
  462. # again at that time), we need to update the inode number in the cache with what
  463. # we see in the filesystem.
  464. entry[1] = st.st_ino
  465. self.files[path_hash] = msgpack.packb(entry)
  466. return entry[4]
  467. else:
  468. return None
  469. def memorize_file(self, path_hash, st, ids):
  470. if not (self.do_files and stat.S_ISREG(st.st_mode)):
  471. return
  472. # Entry: Age, inode, size, mtime, chunk ids
  473. mtime_ns = safe_ns(st.st_mtime_ns)
  474. self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
  475. self._newest_mtime = max(self._newest_mtime or 0, mtime_ns)