repository.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. from configparser import ConfigParser
  2. from binascii import unhexlify
  3. from datetime import datetime
  4. from itertools import islice
  5. import errno
  6. import os
  7. import shutil
  8. import struct
  9. from zlib import crc32
  10. import msgpack
  11. from .logger import create_logger
  12. logger = create_logger()
  13. from .helpers import Error, ErrorWithTraceback, IntegrityError, Location, ProgressIndicatorPercent, bin_to_hex
  14. from .hashindex import NSIndex
  15. from .locking import Lock, LockError, LockErrorT
  16. from .lrucache import LRUCache
  17. from .platform import sync_dir
  18. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  19. MAGIC = b'BORG_SEG'
  20. MAGIC_LEN = len(MAGIC)
  21. TAG_PUT = 0
  22. TAG_DELETE = 1
  23. TAG_COMMIT = 2
  24. class Repository:
  25. """Filesystem based transactional key value store
  26. On disk layout:
  27. dir/README
  28. dir/config
  29. dir/data/<X / SEGMENTS_PER_DIR>/<X>
  30. dir/index.X
  31. dir/hints.X
  32. """
  33. DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
  34. DEFAULT_SEGMENTS_PER_DIR = 10000
  35. class DoesNotExist(Error):
  36. """Repository {} does not exist."""
  37. class AlreadyExists(Error):
  38. """Repository {} already exists."""
  39. class InvalidRepository(Error):
  40. """{} is not a valid repository. Check repo config."""
  41. class CheckNeeded(ErrorWithTraceback):
  42. """Inconsistency detected. Please run "borg check {}"."""
  43. class ObjectNotFound(ErrorWithTraceback):
  44. """Object with key {} not found in repository {}."""
  45. def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True, append_only=False):
  46. self.path = os.path.abspath(path)
  47. self._location = Location('file://%s' % self.path)
  48. self.io = None
  49. self.lock = None
  50. self.index = None
  51. self._active_txn = False
  52. self.lock_wait = lock_wait
  53. self.do_lock = lock
  54. self.do_create = create
  55. self.exclusive = exclusive
  56. self.append_only = append_only
  57. def __del__(self):
  58. if self.lock:
  59. self.close()
  60. assert False, "cleanup happened in Repository.__del__"
  61. def __repr__(self):
  62. return '<%s %s>' % (self.__class__.__name__, self.path)
  63. def __enter__(self):
  64. if self.do_create:
  65. self.do_create = False
  66. self.create(self.path)
  67. self.open(self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock)
  68. return self
  69. def __exit__(self, exc_type, exc_val, exc_tb):
  70. if exc_type is not None:
  71. no_space_left_on_device = exc_type is OSError and exc_val.errno == errno.ENOSPC
  72. # The ENOSPC could have originated somewhere else besides the Repository. The cleanup is always safe, unless
  73. # EIO or FS corruption ensues, which is why we specifically check for ENOSPC.
  74. if self._active_txn and no_space_left_on_device:
  75. logger.warning('No space left on device, cleaning up partial transaction to free space.')
  76. self.io.cleanup(self.io.get_segments_transaction_id())
  77. self.rollback()
  78. self.close()
  79. def create(self, path):
  80. """Create a new empty repository at `path`
  81. """
  82. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  83. raise self.AlreadyExists(path)
  84. if not os.path.exists(path):
  85. os.mkdir(path)
  86. with open(os.path.join(path, 'README'), 'w') as fd:
  87. fd.write('This is a Borg repository\n')
  88. os.mkdir(os.path.join(path, 'data'))
  89. config = ConfigParser(interpolation=None)
  90. config.add_section('repository')
  91. config.set('repository', 'version', '1')
  92. config.set('repository', 'segments_per_dir', str(self.DEFAULT_SEGMENTS_PER_DIR))
  93. config.set('repository', 'max_segment_size', str(self.DEFAULT_MAX_SEGMENT_SIZE))
  94. config.set('repository', 'append_only', str(int(self.append_only)))
  95. config.set('repository', 'id', bin_to_hex(os.urandom(32)))
  96. self.save_config(path, config)
  97. def save_config(self, path, config):
  98. config_path = os.path.join(path, 'config')
  99. with open(config_path, 'w') as fd:
  100. config.write(fd)
  101. def save_key(self, keydata):
  102. assert self.config
  103. keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
  104. self.config.set('repository', 'key', keydata)
  105. self.save_config(self.path, self.config)
  106. def load_key(self):
  107. keydata = self.config.get('repository', 'key')
  108. return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
  109. def destroy(self):
  110. """Destroy the repository at `self.path`
  111. """
  112. if self.append_only:
  113. raise ValueError(self.path + " is in append-only mode")
  114. self.close()
  115. os.remove(os.path.join(self.path, 'config')) # kill config first
  116. shutil.rmtree(self.path)
  117. def get_index_transaction_id(self):
  118. indices = sorted(int(fn[6:])
  119. for fn in os.listdir(self.path)
  120. if fn.startswith('index.') and fn[6:].isdigit() and os.stat(os.path.join(self.path, fn)).st_size != 0)
  121. if indices:
  122. return indices[-1]
  123. else:
  124. return None
  125. def get_transaction_id(self):
  126. index_transaction_id = self.get_index_transaction_id()
  127. segments_transaction_id = self.io.get_segments_transaction_id()
  128. if index_transaction_id is not None and segments_transaction_id is None:
  129. raise self.CheckNeeded(self.path)
  130. # Attempt to automatically rebuild index if we crashed between commit
  131. # tag write and index save
  132. if index_transaction_id != segments_transaction_id:
  133. if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
  134. replay_from = None
  135. else:
  136. replay_from = index_transaction_id
  137. self.replay_segments(replay_from, segments_transaction_id)
  138. return self.get_index_transaction_id()
  139. def break_lock(self):
  140. Lock(os.path.join(self.path, 'lock')).break_lock()
  141. def open(self, path, exclusive, lock_wait=None, lock=True):
  142. self.path = path
  143. if not os.path.isdir(path):
  144. raise self.DoesNotExist(path)
  145. if lock:
  146. self.lock = Lock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait).acquire()
  147. else:
  148. self.lock = None
  149. self.config = ConfigParser(interpolation=None)
  150. self.config.read(os.path.join(self.path, 'config'))
  151. if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
  152. self.close()
  153. raise self.InvalidRepository(path)
  154. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  155. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  156. # append_only can be set in the constructor
  157. # it shouldn't be overridden (True -> False) here
  158. self.append_only = self.append_only or self.config.getboolean('repository', 'append_only', fallback=False)
  159. self.id = unhexlify(self.config.get('repository', 'id').strip())
  160. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
  161. def close(self):
  162. if self.lock:
  163. if self.io:
  164. self.io.close()
  165. self.io = None
  166. self.lock.release()
  167. self.lock = None
  168. def commit(self, save_space=False):
  169. """Commit transaction
  170. """
  171. self.io.write_commit()
  172. if not self.append_only:
  173. self.compact_segments(save_space=save_space)
  174. self.write_index()
  175. self.rollback()
  176. def open_index(self, transaction_id):
  177. if transaction_id is None:
  178. return NSIndex()
  179. return NSIndex.read((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'))
  180. def prepare_txn(self, transaction_id, do_cleanup=True):
  181. self._active_txn = True
  182. if not self.lock.got_exclusive_lock():
  183. if self.exclusive is not None:
  184. # self.exclusive is either True or False, thus a new client is active here.
  185. # if it is False and we get here, the caller did not use exclusive=True although
  186. # it is needed for a write operation. if it is True and we get here, something else
  187. # went very wrong, because we should have a exclusive lock, but we don't.
  188. raise AssertionError("bug in code, exclusive lock should exist here")
  189. # if we are here, this is an old client talking to a new server (expecting lock upgrade).
  190. # or we are replaying segments and might need a lock upgrade for that.
  191. try:
  192. self.lock.upgrade()
  193. except (LockError, LockErrorT):
  194. # if upgrading the lock to exclusive fails, we do not have an
  195. # active transaction. this is important for "serve" mode, where
  196. # the repository instance lives on - even if exceptions happened.
  197. self._active_txn = False
  198. raise
  199. if not self.index or transaction_id is None:
  200. self.index = self.open_index(transaction_id)
  201. if transaction_id is None:
  202. self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
  203. self.compact = set() # XXX bad name: segments_needing_compaction = self.compact
  204. else:
  205. if do_cleanup:
  206. self.io.cleanup(transaction_id)
  207. with open(os.path.join(self.path, 'hints.%d' % transaction_id), 'rb') as fd:
  208. hints = msgpack.unpack(fd)
  209. hints_version = hints[b'version']
  210. if hints_version not in (1, 2):
  211. raise ValueError('Unknown hints file version: %d' % hints_version)
  212. self.segments = hints[b'segments']
  213. if hints_version == 1:
  214. self.compact = set(hints[b'compact'])
  215. elif hints_version == 2:
  216. self.compact = set(hints[b'compact'].keys())
  217. def write_index(self):
  218. hints = {b'version': 1,
  219. b'segments': self.segments,
  220. b'compact': list(self.compact)}
  221. transaction_id = self.io.get_segments_transaction_id()
  222. assert transaction_id is not None
  223. hints_file = os.path.join(self.path, 'hints.%d' % transaction_id)
  224. with open(hints_file + '.tmp', 'wb') as fd:
  225. msgpack.pack(hints, fd)
  226. fd.flush()
  227. os.fsync(fd.fileno())
  228. os.rename(hints_file + '.tmp', hints_file)
  229. self.index.write(os.path.join(self.path, 'index.tmp'))
  230. os.rename(os.path.join(self.path, 'index.tmp'),
  231. os.path.join(self.path, 'index.%d' % transaction_id))
  232. if self.append_only:
  233. with open(os.path.join(self.path, 'transactions'), 'a') as log:
  234. print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
  235. # Remove old indices
  236. current = '.%d' % transaction_id
  237. for name in os.listdir(self.path):
  238. if not name.startswith('index.') and not name.startswith('hints.'):
  239. continue
  240. if name.endswith(current):
  241. continue
  242. os.unlink(os.path.join(self.path, name))
  243. self.index = None
  244. def compact_segments(self, save_space=False):
  245. """Compact sparse segments by copying data into new segments
  246. """
  247. if not self.compact:
  248. return
  249. index_transaction_id = self.get_index_transaction_id()
  250. segments = self.segments
  251. unused = [] # list of segments, that are not used anymore
  252. def complete_xfer():
  253. # complete the transfer (usually exactly when some target segment
  254. # is full, or at the very end when everything is processed)
  255. nonlocal unused
  256. # commit the new, compact, used segments
  257. self.io.write_commit()
  258. # get rid of the old, sparse, unused segments. free space.
  259. for segment in unused:
  260. assert self.segments.pop(segment) == 0
  261. self.io.delete_segment(segment)
  262. unused = []
  263. # The first segment compaction creates, if any
  264. first_new_segment = self.io.get_latest_segment() + 1
  265. for segment in sorted(self.compact):
  266. if self.io.segment_exists(segment):
  267. for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
  268. if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
  269. try:
  270. new_segment, offset = self.io.write_put(key, data, raise_full=save_space)
  271. except LoggedIO.SegmentFull:
  272. complete_xfer()
  273. new_segment, offset = self.io.write_put(key, data)
  274. self.index[key] = new_segment, offset
  275. segments.setdefault(new_segment, 0)
  276. segments[new_segment] += 1
  277. segments[segment] -= 1
  278. elif tag == TAG_DELETE:
  279. if index_transaction_id is None or segment > index_transaction_id:
  280. # (introduced in 6425d16aa84be1eaaf88)
  281. # This is needed to avoid object un-deletion if we crash between the commit and the deletion
  282. # of old segments in complete_xfer().
  283. #
  284. # However, this only happens if the crash also affects the FS to the effect that file deletions
  285. # did not materialize consistently after journal recovery. If they always materialize in-order
  286. # then this is not a problem, because the old segment containing a deleted object would be deleted
  287. # before the segment containing the delete.
  288. #
  289. # Consider the following series of operations if we would not do this, ie. this entire if:
  290. # would be removed.
  291. # Columns are segments, lines are different keys (line 1 = some key, line 2 = some other key)
  292. # Legend: P=TAG_PUT, D=TAG_DELETE, c=commit, i=index is written for latest commit
  293. #
  294. # Segment | 1 | 2 | 3
  295. # --------+-------+-----+------
  296. # Key 1 | P | D |
  297. # Key 2 | P | | P
  298. # commits | c i | c | c i
  299. # --------+-------+-----+------
  300. # ^- compact_segments starts
  301. # ^- complete_xfer commits, after that complete_xfer deletes
  302. # segments 1 and 2 (and then the index would be written).
  303. #
  304. # Now we crash. But only segment 2 gets deleted, while segment 1 is still around. Now key 1
  305. # is suddenly undeleted (because the delete in segment 2 is now missing).
  306. # Again, note the requirement here. We delete these in the correct order that this doesn't happen,
  307. # and only if the FS materialization of these deletes is reordered or parts dropped this can happen.
  308. # In this case it doesn't cause outright corruption, 'just' an index count mismatch, which will be
  309. # fixed by borg-check --repair.
  310. #
  311. # Note that in this check the index state is the proxy for a "most definitely settled" repository state,
  312. # ie. the assumption is that *all* operations on segments <= index state are completed and stable.
  313. try:
  314. new_segment = self.io.write_delete(key, raise_full=save_space)
  315. except LoggedIO.SegmentFull:
  316. complete_xfer()
  317. new_segment = self.io.write_delete(key)
  318. self.compact.add(new_segment)
  319. self.segments.setdefault(new_segment, 0)
  320. assert segments[segment] == 0
  321. unused.append(segment)
  322. complete_xfer()
  323. # Moving of deletes creates new sparse segments, only store these. All other segments
  324. # are compact now.
  325. self.compact = {segment for segment in self.compact if segment >= first_new_segment}
  326. def replay_segments(self, index_transaction_id, segments_transaction_id):
  327. # fake an old client, so that in case we do not have an exclusive lock yet, prepare_txn will upgrade the lock:
  328. remember_exclusive = self.exclusive
  329. self.exclusive = None
  330. self.prepare_txn(index_transaction_id, do_cleanup=False)
  331. try:
  332. segment_count = sum(1 for _ in self.io.segment_iterator())
  333. pi = ProgressIndicatorPercent(total=segment_count, msg="Replaying segments %3.0f%%", same_line=True)
  334. for i, (segment, filename) in enumerate(self.io.segment_iterator()):
  335. pi.show(i)
  336. if index_transaction_id is not None and segment <= index_transaction_id:
  337. continue
  338. if segment > segments_transaction_id:
  339. break
  340. objects = self.io.iter_objects(segment)
  341. self._update_index(segment, objects)
  342. pi.finish()
  343. self.write_index()
  344. finally:
  345. self.exclusive = remember_exclusive
  346. self.rollback()
  347. def _update_index(self, segment, objects, report=None):
  348. """some code shared between replay_segments and check"""
  349. self.segments[segment] = 0
  350. for tag, key, offset in objects:
  351. if tag == TAG_PUT:
  352. try:
  353. s, _ = self.index[key]
  354. self.compact.add(s)
  355. self.segments[s] -= 1
  356. except KeyError:
  357. pass
  358. self.index[key] = segment, offset
  359. self.segments[segment] += 1
  360. elif tag == TAG_DELETE:
  361. try:
  362. s, _ = self.index.pop(key)
  363. self.segments[s] -= 1
  364. self.compact.add(s)
  365. except KeyError:
  366. pass
  367. self.compact.add(segment)
  368. elif tag == TAG_COMMIT:
  369. continue
  370. else:
  371. msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
  372. if report is None:
  373. raise self.CheckNeeded(msg)
  374. else:
  375. report(msg)
  376. if self.segments[segment] == 0:
  377. self.compact.add(segment)
  378. def check(self, repair=False, save_space=False):
  379. """Check repository consistency
  380. This method verifies all segment checksums and makes sure
  381. the index is consistent with the data stored in the segments.
  382. """
  383. if self.append_only and repair:
  384. raise ValueError(self.path + " is in append-only mode")
  385. error_found = False
  386. def report_error(msg):
  387. nonlocal error_found
  388. error_found = True
  389. logger.error(msg)
  390. logger.info('Starting repository check')
  391. assert not self._active_txn
  392. try:
  393. transaction_id = self.get_transaction_id()
  394. current_index = self.open_index(transaction_id)
  395. except Exception:
  396. transaction_id = self.io.get_segments_transaction_id()
  397. current_index = None
  398. if transaction_id is None:
  399. transaction_id = self.get_index_transaction_id()
  400. if transaction_id is None:
  401. transaction_id = self.io.get_latest_segment()
  402. if transaction_id is None:
  403. report_error('This repository contains no valid data.')
  404. return False
  405. if repair:
  406. self.io.cleanup(transaction_id)
  407. segments_transaction_id = self.io.get_segments_transaction_id()
  408. self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
  409. segment_count = sum(1 for _ in self.io.segment_iterator())
  410. pi = ProgressIndicatorPercent(total=segment_count, msg="Checking segments %3.1f%%", step=0.1, same_line=True)
  411. for i, (segment, filename) in enumerate(self.io.segment_iterator()):
  412. pi.show(i)
  413. if segment > transaction_id:
  414. continue
  415. try:
  416. objects = list(self.io.iter_objects(segment))
  417. except IntegrityError as err:
  418. report_error(str(err))
  419. objects = []
  420. if repair:
  421. self.io.recover_segment(segment, filename)
  422. objects = list(self.io.iter_objects(segment))
  423. self._update_index(segment, objects, report_error)
  424. pi.finish()
  425. # self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
  426. # We might need to add a commit tag if no committed segment is found
  427. if repair and segments_transaction_id is None:
  428. report_error('Adding commit tag to segment {}'.format(transaction_id))
  429. self.io.segment = transaction_id + 1
  430. self.io.write_commit()
  431. if current_index and not repair:
  432. # current_index = "as found on disk"
  433. # self.index = "as rebuilt in-memory from segments"
  434. if len(current_index) != len(self.index):
  435. report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
  436. elif current_index:
  437. for key, value in self.index.iteritems():
  438. if current_index.get(key, (-1, -1)) != value:
  439. report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
  440. if repair:
  441. self.compact_segments(save_space=save_space)
  442. self.write_index()
  443. self.rollback()
  444. if error_found:
  445. if repair:
  446. logger.info('Completed repository check, errors found and repaired.')
  447. else:
  448. logger.error('Completed repository check, errors found.')
  449. else:
  450. logger.info('Completed repository check, no problems found.')
  451. return not error_found or repair
  452. def rollback(self):
  453. """
  454. """
  455. self.index = None
  456. self._active_txn = False
  457. def __len__(self):
  458. if not self.index:
  459. self.index = self.open_index(self.get_transaction_id())
  460. return len(self.index)
  461. def __contains__(self, id):
  462. if not self.index:
  463. self.index = self.open_index(self.get_transaction_id())
  464. return id in self.index
  465. def list(self, limit=None, marker=None):
  466. if not self.index:
  467. self.index = self.open_index(self.get_transaction_id())
  468. return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
  469. def get(self, id_):
  470. if not self.index:
  471. self.index = self.open_index(self.get_transaction_id())
  472. try:
  473. segment, offset = self.index[id_]
  474. return self.io.read(segment, offset, id_)
  475. except KeyError:
  476. raise self.ObjectNotFound(id_, self.path) from None
  477. def get_many(self, ids, is_preloaded=False):
  478. for id_ in ids:
  479. yield self.get(id_)
  480. def put(self, id, data, wait=True):
  481. if not self._active_txn:
  482. self.prepare_txn(self.get_transaction_id())
  483. try:
  484. segment, _ = self.index[id]
  485. self.segments[segment] -= 1
  486. self.compact.add(segment)
  487. segment = self.io.write_delete(id)
  488. self.segments.setdefault(segment, 0)
  489. self.compact.add(segment)
  490. except KeyError:
  491. pass
  492. segment, offset = self.io.write_put(id, data)
  493. self.segments.setdefault(segment, 0)
  494. self.segments[segment] += 1
  495. self.index[id] = segment, offset
  496. def delete(self, id, wait=True):
  497. if not self._active_txn:
  498. self.prepare_txn(self.get_transaction_id())
  499. try:
  500. segment, offset = self.index.pop(id)
  501. except KeyError:
  502. raise self.ObjectNotFound(id, self.path) from None
  503. self.segments[segment] -= 1
  504. self.compact.add(segment)
  505. segment = self.io.write_delete(id)
  506. self.compact.add(segment)
  507. self.segments.setdefault(segment, 0)
  508. def preload(self, ids):
  509. """Preload objects (only applies to remote repositories)
  510. """
  511. class LoggedIO:
  512. class SegmentFull(Exception):
  513. """raised when a segment is full, before opening next"""
  514. header_fmt = struct.Struct('<IIB')
  515. assert header_fmt.size == 9
  516. put_header_fmt = struct.Struct('<IIB32s')
  517. assert put_header_fmt.size == 41
  518. header_no_crc_fmt = struct.Struct('<IB')
  519. assert header_no_crc_fmt.size == 5
  520. crc_fmt = struct.Struct('<I')
  521. assert crc_fmt.size == 4
  522. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  523. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  524. def __init__(self, path, limit, segments_per_dir, capacity=90):
  525. self.path = path
  526. self.fds = LRUCache(capacity,
  527. dispose=lambda fd: fd.close())
  528. self.segment = 0
  529. self.limit = limit
  530. self.segments_per_dir = segments_per_dir
  531. self.offset = 0
  532. self._write_fd = None
  533. def close(self):
  534. self.close_segment()
  535. self.fds.clear()
  536. self.fds = None # Just to make sure we're disabled
  537. def segment_iterator(self, reverse=False):
  538. data_path = os.path.join(self.path, 'data')
  539. dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
  540. for dir in dirs:
  541. filenames = os.listdir(os.path.join(data_path, dir))
  542. sorted_filenames = sorted((filename for filename in filenames
  543. if filename.isdigit()), key=int, reverse=reverse)
  544. for filename in sorted_filenames:
  545. yield int(filename), os.path.join(data_path, dir, filename)
  546. def get_latest_segment(self):
  547. for segment, filename in self.segment_iterator(reverse=True):
  548. return segment
  549. return None
  550. def get_segments_transaction_id(self):
  551. """Return last committed segment
  552. """
  553. for segment, filename in self.segment_iterator(reverse=True):
  554. if self.is_committed_segment(segment):
  555. return segment
  556. return None
  557. def cleanup(self, transaction_id):
  558. """Delete segment files left by aborted transactions
  559. """
  560. self.segment = transaction_id + 1
  561. for segment, filename in self.segment_iterator(reverse=True):
  562. if segment > transaction_id:
  563. os.unlink(filename)
  564. else:
  565. break
  566. def is_committed_segment(self, segment):
  567. """Check if segment ends with a COMMIT_TAG tag
  568. """
  569. try:
  570. iterator = self.iter_objects(segment)
  571. except IntegrityError:
  572. return False
  573. with open(self.segment_filename(segment), 'rb') as fd:
  574. try:
  575. fd.seek(-self.header_fmt.size, os.SEEK_END)
  576. except OSError as e:
  577. # return False if segment file is empty or too small
  578. if e.errno == errno.EINVAL:
  579. return False
  580. raise e
  581. if fd.read(self.header_fmt.size) != self.COMMIT:
  582. return False
  583. seen_commit = False
  584. while True:
  585. try:
  586. tag, key, offset = next(iterator)
  587. except IntegrityError:
  588. return False
  589. except StopIteration:
  590. break
  591. if tag == TAG_COMMIT:
  592. seen_commit = True
  593. continue
  594. if seen_commit:
  595. return False
  596. return seen_commit
  597. def segment_filename(self, segment):
  598. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  599. def get_write_fd(self, no_new=False, raise_full=False):
  600. if not no_new and self.offset and self.offset > self.limit:
  601. if raise_full:
  602. raise self.SegmentFull
  603. self.close_segment()
  604. if not self._write_fd:
  605. if self.segment % self.segments_per_dir == 0:
  606. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  607. if not os.path.exists(dirname):
  608. os.mkdir(dirname)
  609. sync_dir(os.path.join(self.path, 'data'))
  610. self._write_fd = open(self.segment_filename(self.segment), 'ab')
  611. self._write_fd.write(MAGIC)
  612. self.offset = MAGIC_LEN
  613. return self._write_fd
  614. def get_fd(self, segment):
  615. try:
  616. return self.fds[segment]
  617. except KeyError:
  618. fd = open(self.segment_filename(segment), 'rb')
  619. self.fds[segment] = fd
  620. return fd
  621. def delete_segment(self, segment):
  622. if segment in self.fds:
  623. del self.fds[segment]
  624. try:
  625. os.unlink(self.segment_filename(segment))
  626. except FileNotFoundError:
  627. pass
  628. def segment_exists(self, segment):
  629. return os.path.exists(self.segment_filename(segment))
  630. def iter_objects(self, segment, include_data=False):
  631. fd = self.get_fd(segment)
  632. fd.seek(0)
  633. if fd.read(MAGIC_LEN) != MAGIC:
  634. raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
  635. offset = MAGIC_LEN
  636. header = fd.read(self.header_fmt.size)
  637. while header:
  638. size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
  639. (TAG_PUT, TAG_DELETE, TAG_COMMIT))
  640. if include_data:
  641. yield tag, key, offset, data
  642. else:
  643. yield tag, key, offset
  644. offset += size
  645. # we must get the fd via get_fd() here again as we yielded to our caller and it might
  646. # have triggered closing of the fd we had before (e.g. by calling io.read() for
  647. # different segment(s)).
  648. # by calling get_fd() here again we also make our fd "recently used" so it likely
  649. # does not get kicked out of self.fds LRUcache.
  650. fd = self.get_fd(segment)
  651. fd.seek(offset)
  652. header = fd.read(self.header_fmt.size)
  653. def recover_segment(self, segment, filename):
  654. if segment in self.fds:
  655. del self.fds[segment]
  656. with open(filename, 'rb') as fd:
  657. data = memoryview(fd.read())
  658. os.rename(filename, filename + '.beforerecover')
  659. logger.info('attempting to recover ' + filename)
  660. with open(filename, 'wb') as fd:
  661. fd.write(MAGIC)
  662. while len(data) >= self.header_fmt.size:
  663. crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
  664. if size < self.header_fmt.size or size > len(data):
  665. data = data[1:]
  666. continue
  667. if crc32(data[4:size]) & 0xffffffff != crc:
  668. data = data[1:]
  669. continue
  670. fd.write(data[:size])
  671. data = data[size:]
  672. def read(self, segment, offset, id):
  673. if segment == self.segment and self._write_fd:
  674. self._write_fd.flush()
  675. fd = self.get_fd(segment)
  676. fd.seek(offset)
  677. header = fd.read(self.put_header_fmt.size)
  678. size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ))
  679. if id != key:
  680. raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
  681. segment, offset))
  682. return data
  683. def _read(self, fd, fmt, header, segment, offset, acceptable_tags):
  684. # some code shared by read() and iter_objects()
  685. try:
  686. hdr_tuple = fmt.unpack(header)
  687. except struct.error as err:
  688. raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
  689. segment, offset, err)) from None
  690. if fmt is self.put_header_fmt:
  691. crc, size, tag, key = hdr_tuple
  692. elif fmt is self.header_fmt:
  693. crc, size, tag = hdr_tuple
  694. key = None
  695. else:
  696. raise TypeError("_read called with unsupported format")
  697. if size > MAX_OBJECT_SIZE:
  698. # if you get this on an archive made with borg < 1.0.7 and millions of files and
  699. # you need to restore it, you can disable this check by using "if False:" above.
  700. raise IntegrityError('Invalid segment entry size {} - too big [segment {}, offset {}]'.format(
  701. size, segment, offset))
  702. if size < fmt.size:
  703. raise IntegrityError('Invalid segment entry size {} - too small [segment {}, offset {}]'.format(
  704. size, segment, offset))
  705. length = size - fmt.size
  706. data = fd.read(length)
  707. if len(data) != length:
  708. raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
  709. segment, offset, length, len(data)))
  710. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  711. raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
  712. segment, offset))
  713. if tag not in acceptable_tags:
  714. raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
  715. segment, offset))
  716. if key is None and tag in (TAG_PUT, TAG_DELETE):
  717. key, data = data[:32], data[32:]
  718. return size, tag, key, data
  719. def write_put(self, id, data, raise_full=False):
  720. data_size = len(data)
  721. if data_size > MAX_DATA_SIZE:
  722. # this would push the segment entry size beyond MAX_OBJECT_SIZE.
  723. raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
  724. fd = self.get_write_fd(raise_full=raise_full)
  725. size = data_size + self.put_header_fmt.size
  726. offset = self.offset
  727. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  728. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  729. fd.write(b''.join((crc, header, id, data)))
  730. self.offset += size
  731. return self.segment, offset
  732. def write_delete(self, id, raise_full=False):
  733. fd = self.get_write_fd(raise_full=raise_full)
  734. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  735. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  736. fd.write(b''.join((crc, header, id)))
  737. self.offset += self.put_header_fmt.size
  738. return self.segment
  739. def write_commit(self):
  740. fd = self.get_write_fd(no_new=True)
  741. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  742. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  743. # first fsync(fd) here (to ensure data supposedly hits the disk before the commit tag)
  744. fd.flush()
  745. os.fsync(fd.fileno())
  746. fd.write(b''.join((crc, header)))
  747. self.close_segment() # after-commit fsync()
  748. def close_segment(self):
  749. if self._write_fd:
  750. self.segment += 1
  751. self.offset = 0
  752. self._write_fd.flush()
  753. os.fsync(self._write_fd.fileno())
  754. if hasattr(os, 'posix_fadvise'): # only on UNIX
  755. # tell the OS that it does not need to cache what we just wrote,
  756. # avoids spoiling the cache for the OS and other processes.
  757. os.posix_fadvise(self._write_fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
  758. self._write_fd.close()
  759. sync_dir(os.path.dirname(self._write_fd.name))
  760. self._write_fd = None
  761. MAX_DATA_SIZE = MAX_OBJECT_SIZE - LoggedIO.put_header_fmt.size