repository.py 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. import errno
  2. import os
  3. import shutil
  4. import struct
  5. from binascii import hexlify, unhexlify
  6. from collections import defaultdict
  7. from configparser import ConfigParser
  8. from datetime import datetime
  9. from functools import partial
  10. from itertools import islice
  11. import msgpack
  12. from .constants import * # NOQA
  13. from .hashindex import NSIndex
  14. from .helpers import Error, ErrorWithTraceback, IntegrityError, format_file_size, parse_file_size
  15. from .helpers import Location
  16. from .helpers import ProgressIndicatorPercent
  17. from .helpers import bin_to_hex
  18. from .helpers import yes
  19. from .locking import Lock, LockError, LockErrorT
  20. from .logger import create_logger
  21. from .lrucache import LRUCache
  22. from .platform import SaveFile, SyncFile, sync_dir
  23. from .crc32 import crc32
  24. logger = create_logger(__name__)
  25. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  26. MAGIC = b'BORG_SEG'
  27. MAGIC_LEN = len(MAGIC)
  28. TAG_PUT = 0
  29. TAG_DELETE = 1
  30. TAG_COMMIT = 2
  31. FreeSpace = partial(defaultdict, int)
  32. class Repository:
  33. """
  34. Filesystem based transactional key value store
  35. Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
  36. called segments. Each segment is a series of log entries. The segment number together with the offset of each
  37. entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
  38. time for the purposes of the log.
  39. Log entries are either PUT, DELETE or COMMIT.
  40. A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
  41. segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
  42. is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
  43. When reading from a repository it is first checked whether the last segment is committed. If it is not, then
  44. all segments after the last committed segment are deleted; they contain log entries whose consistency is not
  45. established by a COMMIT.
  46. Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
  47. the platform (including the hardware). See platform.base.SyncFile for details.
  48. A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
  49. full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
  50. A DELETE marks a key as deleted.
  51. For a given key only the last entry regarding the key, which is called current (all other entries are called
  52. superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
  53. Otherwise the last PUT defines the value of the key.
  54. By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
  55. such obsolete entries is called sparse, while a segment containing no such entries is called compact.
  56. Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
  57. superseded entries where current.
  58. On disk layout:
  59. dir/README
  60. dir/config
  61. dir/data/<X // SEGMENTS_PER_DIR>/<X>
  62. dir/index.X
  63. dir/hints.X
  64. """
  65. class DoesNotExist(Error):
  66. """Repository {} does not exist."""
  67. class AlreadyExists(Error):
  68. """Repository {} already exists."""
  69. class InvalidRepository(Error):
  70. """{} is not a valid repository. Check repo config."""
  71. class CheckNeeded(ErrorWithTraceback):
  72. """Inconsistency detected. Please run "borg check {}"."""
  73. class ObjectNotFound(ErrorWithTraceback):
  74. """Object with key {} not found in repository {}."""
  75. def __init__(self, id, repo):
  76. if isinstance(id, bytes):
  77. id = bin_to_hex(id)
  78. super().__init__(id, repo)
  79. class InsufficientFreeSpaceError(Error):
  80. """Insufficient free space to complete transaction (required: {}, available: {})."""
  81. def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True, append_only=False):
  82. self.path = os.path.abspath(path)
  83. self._location = Location('file://%s' % self.path)
  84. self.io = None
  85. self.lock = None
  86. self.index = None
  87. # This is an index of shadowed log entries during this transaction. Consider the following sequence:
  88. # segment_n PUT A, segment_x DELETE A
  89. # After the "DELETE A" in segment_x the shadow index will contain "A -> [n]".
  90. self.shadow_index = {}
  91. self._active_txn = False
  92. self.lock_wait = lock_wait
  93. self.do_lock = lock
  94. self.do_create = create
  95. self.created = False
  96. self.exclusive = exclusive
  97. self.append_only = append_only
  98. self.hostname_is_unique = yes(env_var_override='BORG_HOSTNAME_IS_UNIQUE', env_msg=None, prompt=False)
  99. if self.hostname_is_unique:
  100. logger.info('Enabled removal of stale repository locks')
  101. def __del__(self):
  102. if self.lock:
  103. self.close()
  104. assert False, "cleanup happened in Repository.__del__"
  105. def __repr__(self):
  106. return '<%s %s>' % (self.__class__.__name__, self.path)
  107. def __enter__(self):
  108. if self.do_create:
  109. self.do_create = False
  110. self.create(self.path)
  111. self.created = True
  112. self.open(self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock)
  113. return self
  114. def __exit__(self, exc_type, exc_val, exc_tb):
  115. if exc_type is not None:
  116. no_space_left_on_device = exc_type is OSError and exc_val.errno == errno.ENOSPC
  117. # The ENOSPC could have originated somewhere else besides the Repository. The cleanup is always safe, unless
  118. # EIO or FS corruption ensues, which is why we specifically check for ENOSPC.
  119. if self._active_txn and no_space_left_on_device:
  120. logger.warning('No space left on device, cleaning up partial transaction to free space.')
  121. cleanup = True
  122. else:
  123. cleanup = False
  124. self._rollback(cleanup=cleanup)
  125. self.close()
  126. @property
  127. def id_str(self):
  128. return bin_to_hex(self.id)
  129. def create(self, path):
  130. """Create a new empty repository at `path`
  131. """
  132. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  133. raise self.AlreadyExists(path)
  134. if not os.path.exists(path):
  135. os.mkdir(path)
  136. with open(os.path.join(path, 'README'), 'w') as fd:
  137. fd.write(REPOSITORY_README)
  138. os.mkdir(os.path.join(path, 'data'))
  139. config = ConfigParser(interpolation=None)
  140. config.add_section('repository')
  141. config.set('repository', 'version', '1')
  142. config.set('repository', 'segments_per_dir', str(DEFAULT_SEGMENTS_PER_DIR))
  143. config.set('repository', 'max_segment_size', str(DEFAULT_MAX_SEGMENT_SIZE))
  144. config.set('repository', 'append_only', str(int(self.append_only)))
  145. config.set('repository', 'additional_free_space', '0')
  146. config.set('repository', 'id', bin_to_hex(os.urandom(32)))
  147. self.save_config(path, config)
  148. def save_config(self, path, config):
  149. config_path = os.path.join(path, 'config')
  150. with SaveFile(config_path) as fd:
  151. config.write(fd)
  152. def save_key(self, keydata):
  153. assert self.config
  154. keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
  155. self.config.set('repository', 'key', keydata)
  156. self.save_config(self.path, self.config)
  157. def load_key(self):
  158. keydata = self.config.get('repository', 'key')
  159. return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
  160. def get_free_nonce(self):
  161. if not self.lock.got_exclusive_lock():
  162. raise AssertionError("bug in code, exclusive lock should exist here")
  163. nonce_path = os.path.join(self.path, 'nonce')
  164. try:
  165. with open(nonce_path, 'r') as fd:
  166. return int.from_bytes(unhexlify(fd.read()), byteorder='big')
  167. except FileNotFoundError:
  168. return None
  169. def commit_nonce_reservation(self, next_unreserved, start_nonce):
  170. if not self.lock.got_exclusive_lock():
  171. raise AssertionError("bug in code, exclusive lock should exist here")
  172. if self.get_free_nonce() != start_nonce:
  173. raise Exception("nonce space reservation with mismatched previous state")
  174. nonce_path = os.path.join(self.path, 'nonce')
  175. with SaveFile(nonce_path, binary=False) as fd:
  176. fd.write(bin_to_hex(next_unreserved.to_bytes(8, byteorder='big')))
  177. def destroy(self):
  178. """Destroy the repository at `self.path`
  179. """
  180. if self.append_only:
  181. raise ValueError(self.path + " is in append-only mode")
  182. self.close()
  183. os.remove(os.path.join(self.path, 'config')) # kill config first
  184. shutil.rmtree(self.path)
  185. def get_index_transaction_id(self):
  186. indices = sorted(int(fn[6:])
  187. for fn in os.listdir(self.path)
  188. if fn.startswith('index.') and fn[6:].isdigit() and os.stat(os.path.join(self.path, fn)).st_size != 0)
  189. if indices:
  190. return indices[-1]
  191. else:
  192. return None
  193. def check_transaction(self):
  194. index_transaction_id = self.get_index_transaction_id()
  195. segments_transaction_id = self.io.get_segments_transaction_id()
  196. if index_transaction_id is not None and segments_transaction_id is None:
  197. raise self.CheckNeeded(self.path)
  198. # Attempt to automatically rebuild index if we crashed between commit
  199. # tag write and index save
  200. if index_transaction_id != segments_transaction_id:
  201. if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
  202. replay_from = None
  203. else:
  204. replay_from = index_transaction_id
  205. self.replay_segments(replay_from, segments_transaction_id)
  206. def get_transaction_id(self):
  207. self.check_transaction()
  208. return self.get_index_transaction_id()
  209. def break_lock(self):
  210. Lock(os.path.join(self.path, 'lock')).break_lock()
  211. def open(self, path, exclusive, lock_wait=None, lock=True):
  212. self.path = path
  213. if not os.path.isdir(path):
  214. raise self.DoesNotExist(path)
  215. if lock:
  216. self.lock = Lock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait, kill_stale_locks=self.hostname_is_unique).acquire()
  217. else:
  218. self.lock = None
  219. self.config = ConfigParser(interpolation=None)
  220. self.config.read(os.path.join(self.path, 'config'))
  221. if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
  222. self.close()
  223. raise self.InvalidRepository(path)
  224. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  225. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  226. self.additional_free_space = parse_file_size(self.config.get('repository', 'additional_free_space', fallback=0))
  227. # append_only can be set in the constructor
  228. # it shouldn't be overridden (True -> False) here
  229. self.append_only = self.append_only or self.config.getboolean('repository', 'append_only', fallback=False)
  230. self.id = unhexlify(self.config.get('repository', 'id').strip())
  231. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
  232. def close(self):
  233. if self.lock:
  234. if self.io:
  235. self.io.close()
  236. self.io = None
  237. self.lock.release()
  238. self.lock = None
  239. def commit(self, save_space=False):
  240. """Commit transaction
  241. """
  242. # save_space is not used anymore, but stays for RPC/API compatibility.
  243. self.check_free_space()
  244. self.io.write_commit()
  245. if not self.append_only:
  246. self.compact_segments()
  247. self.write_index()
  248. self.rollback()
  249. def open_index(self, transaction_id, auto_recover=True):
  250. if transaction_id is None:
  251. return NSIndex()
  252. index_path = os.path.join(self.path, 'index.%d' % transaction_id).encode('utf-8')
  253. try:
  254. return NSIndex.read(index_path)
  255. except RuntimeError as error:
  256. assert str(error) == 'hashindex_read failed' # everything else means we're in *deep* trouble
  257. logger.warning('Repository index missing or corrupted, trying to recover')
  258. os.unlink(index_path)
  259. if not auto_recover:
  260. raise
  261. self.prepare_txn(self.get_transaction_id())
  262. # don't leave an open transaction around
  263. self.commit()
  264. return self.open_index(self.get_transaction_id())
  265. def prepare_txn(self, transaction_id, do_cleanup=True):
  266. self._active_txn = True
  267. if not self.lock.got_exclusive_lock():
  268. if self.exclusive is not None:
  269. # self.exclusive is either True or False, thus a new client is active here.
  270. # if it is False and we get here, the caller did not use exclusive=True although
  271. # it is needed for a write operation. if it is True and we get here, something else
  272. # went very wrong, because we should have a exclusive lock, but we don't.
  273. raise AssertionError("bug in code, exclusive lock should exist here")
  274. # if we are here, this is an old client talking to a new server (expecting lock upgrade).
  275. # or we are replaying segments and might need a lock upgrade for that.
  276. try:
  277. self.lock.upgrade()
  278. except (LockError, LockErrorT):
  279. # if upgrading the lock to exclusive fails, we do not have an
  280. # active transaction. this is important for "serve" mode, where
  281. # the repository instance lives on - even if exceptions happened.
  282. self._active_txn = False
  283. raise
  284. if not self.index or transaction_id is None:
  285. try:
  286. self.index = self.open_index(transaction_id, False)
  287. except RuntimeError:
  288. self.check_transaction()
  289. self.index = self.open_index(transaction_id, False)
  290. if transaction_id is None:
  291. self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
  292. self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
  293. self.shadow_index.clear()
  294. else:
  295. if do_cleanup:
  296. self.io.cleanup(transaction_id)
  297. hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
  298. index_path = os.path.join(self.path, 'index.%d' % transaction_id)
  299. try:
  300. with open(hints_path, 'rb') as fd:
  301. hints = msgpack.unpack(fd)
  302. except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
  303. logger.warning('Repository hints file missing or corrupted, trying to recover')
  304. if not isinstance(e, FileNotFoundError):
  305. os.unlink(hints_path)
  306. # index must exist at this point
  307. os.unlink(index_path)
  308. self.check_transaction()
  309. self.prepare_txn(transaction_id)
  310. return
  311. if hints[b'version'] == 1:
  312. logger.debug('Upgrading from v1 hints.%d', transaction_id)
  313. self.segments = hints[b'segments']
  314. self.compact = FreeSpace()
  315. for segment in sorted(hints[b'compact']):
  316. logger.debug('Rebuilding sparse info for segment %d', segment)
  317. self._rebuild_sparse(segment)
  318. logger.debug('Upgrade to v2 hints complete')
  319. elif hints[b'version'] != 2:
  320. raise ValueError('Unknown hints file version: %d' % hints[b'version'])
  321. else:
  322. self.segments = hints[b'segments']
  323. self.compact = FreeSpace(hints[b'compact'])
  324. # Drop uncommitted segments in the shadow index
  325. for key, shadowed_segments in self.shadow_index.items():
  326. for segment in list(shadowed_segments):
  327. if segment > transaction_id:
  328. shadowed_segments.remove(segment)
  329. def write_index(self):
  330. hints = {b'version': 2,
  331. b'segments': self.segments,
  332. b'compact': self.compact}
  333. transaction_id = self.io.get_segments_transaction_id()
  334. assert transaction_id is not None
  335. hints_file = os.path.join(self.path, 'hints.%d' % transaction_id)
  336. with open(hints_file + '.tmp', 'wb') as fd:
  337. msgpack.pack(hints, fd)
  338. fd.flush()
  339. os.fsync(fd.fileno())
  340. os.rename(hints_file + '.tmp', hints_file)
  341. self.index.write(os.path.join(self.path, 'index.tmp'))
  342. os.rename(os.path.join(self.path, 'index.tmp'),
  343. os.path.join(self.path, 'index.%d' % transaction_id))
  344. if self.append_only:
  345. with open(os.path.join(self.path, 'transactions'), 'a') as log:
  346. print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
  347. # Remove old auxiliary files
  348. current = '.%d' % transaction_id
  349. for name in os.listdir(self.path):
  350. if not name.startswith(('index.', 'hints.')):
  351. continue
  352. if name.endswith(current):
  353. continue
  354. os.unlink(os.path.join(self.path, name))
  355. self.index = None
  356. def check_free_space(self):
  357. """Pre-commit check for sufficient free space to actually perform the commit."""
  358. # As a baseline we take four times the current (on-disk) index size.
  359. # At this point the index may only be updated by compaction, which won't resize it.
  360. # We still apply a factor of four so that a later, separate invocation can free space
  361. # (journaling all deletes for all chunks is one index size) or still make minor additions
  362. # (which may grow the index up to twice it's current size).
  363. # Note that in a subsequent operation the committed index is still on-disk, therefore we
  364. # arrive at index_size * (1 + 2 + 1).
  365. # In that order: journaled deletes (1), hashtable growth (2), persisted index (1).
  366. required_free_space = self.index.size() * 4
  367. # Conservatively estimate hints file size:
  368. # 10 bytes for each segment-refcount pair, 10 bytes for each segment-space pair
  369. # Assume maximum of 5 bytes per integer. Segment numbers will usually be packed more densely (1-3 bytes),
  370. # as will refcounts and free space integers. For 5 MiB segments this estimate is good to ~20 PB repo size.
  371. # Add 4K to generously account for constant format overhead.
  372. hints_size = len(self.segments) * 10 + len(self.compact) * 10 + 4096
  373. required_free_space += hints_size
  374. required_free_space += self.additional_free_space
  375. if not self.append_only:
  376. full_segment_size = self.max_segment_size + MAX_OBJECT_SIZE
  377. if len(self.compact) < 10:
  378. # This is mostly for the test suite to avoid overestimated free space needs. This can be annoying
  379. # if TMP is a small-ish tmpfs.
  380. compact_working_space = sum(self.io.segment_size(segment) - free for segment, free in self.compact.items())
  381. logger.debug('check_free_space: few segments, not requiring a full free segment')
  382. compact_working_space = min(compact_working_space, full_segment_size)
  383. logger.debug('check_free_space: calculated working space for compact as %d bytes', compact_working_space)
  384. required_free_space += compact_working_space
  385. else:
  386. # Keep one full worst-case segment free in non-append-only mode
  387. required_free_space += full_segment_size
  388. try:
  389. st_vfs = os.statvfs(self.path)
  390. except OSError as os_error:
  391. logger.warning('Failed to check free space before committing: ' + str(os_error))
  392. return
  393. # f_bavail: even as root - don't touch the Federal Block Reserve!
  394. free_space = st_vfs.f_bavail * st_vfs.f_bsize
  395. logger.debug('check_free_space: required bytes {}, free bytes {}'.format(required_free_space, free_space))
  396. if free_space < required_free_space:
  397. if self.created:
  398. logger.error('Not enough free space to initialize repository at this location.')
  399. self.destroy()
  400. else:
  401. self._rollback(cleanup=True)
  402. formatted_required = format_file_size(required_free_space)
  403. formatted_free = format_file_size(free_space)
  404. raise self.InsufficientFreeSpaceError(formatted_required, formatted_free)
  405. def compact_segments(self):
  406. """Compact sparse segments by copying data into new segments
  407. """
  408. if not self.compact:
  409. return
  410. index_transaction_id = self.get_index_transaction_id()
  411. segments = self.segments
  412. unused = [] # list of segments, that are not used anymore
  413. logger = create_logger('borg.debug.compact_segments')
  414. def complete_xfer(intermediate=True):
  415. # complete the current transfer (when some target segment is full)
  416. nonlocal unused
  417. # commit the new, compact, used segments
  418. segment = self.io.write_commit(intermediate=intermediate)
  419. logger.debug('complete_xfer: wrote %scommit at segment %d', 'intermediate ' if intermediate else '', segment)
  420. # get rid of the old, sparse, unused segments. free space.
  421. for segment in unused:
  422. logger.debug('complete_xfer: deleting unused segment %d', segment)
  423. assert self.segments.pop(segment) == 0
  424. self.io.delete_segment(segment)
  425. del self.compact[segment]
  426. unused = []
  427. logger.debug('compaction started.')
  428. pi = ProgressIndicatorPercent(total=len(self.compact), msg='Compacting segments %3.0f%%', step=1)
  429. for segment, freeable_space in sorted(self.compact.items()):
  430. if not self.io.segment_exists(segment):
  431. logger.warning('segment %d not found, but listed in compaction data', segment)
  432. del self.compact[segment]
  433. pi.show()
  434. continue
  435. segment_size = self.io.segment_size(segment)
  436. if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
  437. logger.debug('not compacting segment %d (only %d bytes are sparse)', segment, freeable_space)
  438. pi.show()
  439. continue
  440. segments.setdefault(segment, 0)
  441. logger.debug('compacting segment %d with usage count %d and %d freeable bytes',
  442. segment, segments[segment], freeable_space)
  443. for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
  444. if tag == TAG_COMMIT:
  445. continue
  446. in_index = self.index.get(key)
  447. is_index_object = in_index == (segment, offset)
  448. if tag == TAG_PUT and is_index_object:
  449. try:
  450. new_segment, offset = self.io.write_put(key, data, raise_full=True)
  451. except LoggedIO.SegmentFull:
  452. complete_xfer()
  453. new_segment, offset = self.io.write_put(key, data)
  454. self.index[key] = new_segment, offset
  455. segments.setdefault(new_segment, 0)
  456. segments[new_segment] += 1
  457. segments[segment] -= 1
  458. elif tag == TAG_PUT and not is_index_object:
  459. # If this is a PUT shadowed by a later tag, then it will be gone when this segment is deleted after
  460. # this loop. Therefore it is removed from the shadow index.
  461. try:
  462. self.shadow_index[key].remove(segment)
  463. except (KeyError, ValueError):
  464. pass
  465. elif tag == TAG_DELETE and not in_index:
  466. # If the shadow index doesn't contain this key, then we can't say if there's a shadowed older tag,
  467. # therefore we do not drop the delete, but write it to a current segment.
  468. shadowed_put_exists = key not in self.shadow_index or any(
  469. # If the key is in the shadow index and there is any segment with an older PUT of this
  470. # key, we have a shadowed put.
  471. shadowed < segment for shadowed in self.shadow_index[key])
  472. delete_is_not_stable = index_transaction_id is None or segment > index_transaction_id
  473. if shadowed_put_exists or delete_is_not_stable:
  474. # (introduced in 6425d16aa84be1eaaf88)
  475. # This is needed to avoid object un-deletion if we crash between the commit and the deletion
  476. # of old segments in complete_xfer().
  477. #
  478. # However, this only happens if the crash also affects the FS to the effect that file deletions
  479. # did not materialize consistently after journal recovery. If they always materialize in-order
  480. # then this is not a problem, because the old segment containing a deleted object would be deleted
  481. # before the segment containing the delete.
  482. #
  483. # Consider the following series of operations if we would not do this, ie. this entire if:
  484. # would be removed.
  485. # Columns are segments, lines are different keys (line 1 = some key, line 2 = some other key)
  486. # Legend: P=TAG_PUT, D=TAG_DELETE, c=commit, i=index is written for latest commit
  487. #
  488. # Segment | 1 | 2 | 3
  489. # --------+-------+-----+------
  490. # Key 1 | P | D |
  491. # Key 2 | P | | P
  492. # commits | c i | c | c i
  493. # --------+-------+-----+------
  494. # ^- compact_segments starts
  495. # ^- complete_xfer commits, after that complete_xfer deletes
  496. # segments 1 and 2 (and then the index would be written).
  497. #
  498. # Now we crash. But only segment 2 gets deleted, while segment 1 is still around. Now key 1
  499. # is suddenly undeleted (because the delete in segment 2 is now missing).
  500. # Again, note the requirement here. We delete these in the correct order that this doesn't happen,
  501. # and only if the FS materialization of these deletes is reordered or parts dropped this can happen.
  502. # In this case it doesn't cause outright corruption, 'just' an index count mismatch, which will be
  503. # fixed by borg-check --repair.
  504. #
  505. # Note that in this check the index state is the proxy for a "most definitely settled" repository state,
  506. # ie. the assumption is that *all* operations on segments <= index state are completed and stable.
  507. try:
  508. new_segment, size = self.io.write_delete(key, raise_full=True)
  509. except LoggedIO.SegmentFull:
  510. complete_xfer()
  511. new_segment, size = self.io.write_delete(key)
  512. self.compact[new_segment] += size
  513. segments.setdefault(new_segment, 0)
  514. assert segments[segment] == 0
  515. unused.append(segment)
  516. pi.show()
  517. pi.finish()
  518. complete_xfer(intermediate=False)
  519. logger.debug('compaction completed.')
  520. def replay_segments(self, index_transaction_id, segments_transaction_id):
  521. # fake an old client, so that in case we do not have an exclusive lock yet, prepare_txn will upgrade the lock:
  522. remember_exclusive = self.exclusive
  523. self.exclusive = None
  524. self.prepare_txn(index_transaction_id, do_cleanup=False)
  525. try:
  526. segment_count = sum(1 for _ in self.io.segment_iterator())
  527. pi = ProgressIndicatorPercent(total=segment_count, msg="Replaying segments %3.0f%%")
  528. for i, (segment, filename) in enumerate(self.io.segment_iterator()):
  529. pi.show(i)
  530. if index_transaction_id is not None and segment <= index_transaction_id:
  531. continue
  532. if segment > segments_transaction_id:
  533. break
  534. objects = self.io.iter_objects(segment)
  535. self._update_index(segment, objects)
  536. pi.finish()
  537. self.write_index()
  538. finally:
  539. self.exclusive = remember_exclusive
  540. self.rollback()
  541. def _update_index(self, segment, objects, report=None):
  542. """some code shared between replay_segments and check"""
  543. self.segments[segment] = 0
  544. for tag, key, offset, size in objects:
  545. if tag == TAG_PUT:
  546. try:
  547. # If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
  548. s, _ = self.index[key]
  549. self.compact[s] += size
  550. self.segments[s] -= 1
  551. except KeyError:
  552. pass
  553. self.index[key] = segment, offset
  554. self.segments[segment] += 1
  555. elif tag == TAG_DELETE:
  556. try:
  557. # if the deleted PUT is not in the index, there is nothing to clean up
  558. s, offset = self.index.pop(key)
  559. except KeyError:
  560. pass
  561. else:
  562. if self.io.segment_exists(s):
  563. # the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
  564. # is already gone, then it was already compacted.
  565. self.segments[s] -= 1
  566. size = self.io.read(s, offset, key, read_data=False)
  567. self.compact[s] += size
  568. elif tag == TAG_COMMIT:
  569. continue
  570. else:
  571. msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
  572. if report is None:
  573. raise self.CheckNeeded(msg)
  574. else:
  575. report(msg)
  576. if self.segments[segment] == 0:
  577. self.compact[segment] += self.io.segment_size(segment)
  578. def _rebuild_sparse(self, segment):
  579. """Rebuild sparse bytes count for a single segment relative to the current index."""
  580. self.compact[segment] = 0
  581. if self.segments[segment] == 0:
  582. self.compact[segment] += self.io.segment_size(segment)
  583. return
  584. for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
  585. if tag == TAG_PUT:
  586. if self.index.get(key, (-1, -1)) != (segment, offset):
  587. # This PUT is superseded later
  588. self.compact[segment] += size
  589. elif tag == TAG_DELETE:
  590. # The outcome of the DELETE has been recorded in the PUT branch already
  591. self.compact[segment] += size
  592. def check(self, repair=False, save_space=False):
  593. """Check repository consistency
  594. This method verifies all segment checksums and makes sure
  595. the index is consistent with the data stored in the segments.
  596. """
  597. if self.append_only and repair:
  598. raise ValueError(self.path + " is in append-only mode")
  599. error_found = False
  600. def report_error(msg):
  601. nonlocal error_found
  602. error_found = True
  603. logger.error(msg)
  604. logger.info('Starting repository check')
  605. assert not self._active_txn
  606. try:
  607. transaction_id = self.get_transaction_id()
  608. current_index = self.open_index(transaction_id)
  609. logger.debug('Read committed index of transaction %d', transaction_id)
  610. except Exception as exc:
  611. transaction_id = self.io.get_segments_transaction_id()
  612. current_index = None
  613. logger.debug('Failed to read committed index (%s)', exc)
  614. if transaction_id is None:
  615. logger.debug('No segments transaction found')
  616. transaction_id = self.get_index_transaction_id()
  617. if transaction_id is None:
  618. logger.debug('No index transaction found, trying latest segment')
  619. transaction_id = self.io.get_latest_segment()
  620. if transaction_id is None:
  621. report_error('This repository contains no valid data.')
  622. return False
  623. if repair:
  624. self.io.cleanup(transaction_id)
  625. segments_transaction_id = self.io.get_segments_transaction_id()
  626. logger.debug('Segment transaction is %s', segments_transaction_id)
  627. logger.debug('Determined transaction is %s', transaction_id)
  628. self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
  629. segment_count = sum(1 for _ in self.io.segment_iterator())
  630. logger.debug('Found %d segments', segment_count)
  631. pi = ProgressIndicatorPercent(total=segment_count, msg="Checking segments %3.1f%%", step=0.1)
  632. for i, (segment, filename) in enumerate(self.io.segment_iterator()):
  633. pi.show(i)
  634. if segment > transaction_id:
  635. continue
  636. try:
  637. objects = list(self.io.iter_objects(segment))
  638. except IntegrityError as err:
  639. report_error(str(err))
  640. objects = []
  641. if repair:
  642. self.io.recover_segment(segment, filename)
  643. objects = list(self.io.iter_objects(segment))
  644. self._update_index(segment, objects, report_error)
  645. pi.finish()
  646. # self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
  647. # We might need to add a commit tag if no committed segment is found
  648. if repair and segments_transaction_id is None:
  649. report_error('Adding commit tag to segment {}'.format(transaction_id))
  650. self.io.segment = transaction_id + 1
  651. self.io.write_commit()
  652. logger.info('Starting repository index check')
  653. if current_index and not repair:
  654. # current_index = "as found on disk"
  655. # self.index = "as rebuilt in-memory from segments"
  656. if len(current_index) != len(self.index):
  657. report_error('Index object count mismatch.')
  658. logger.error('committed index: %d objects', len(current_index))
  659. logger.error('rebuilt index: %d objects', len(self.index))
  660. line_format = '%-64s %-16s %-16s'
  661. not_found = '<not found>'
  662. logger.warning(line_format, 'ID', 'rebuilt index', 'committed index')
  663. for key, value in self.index.iteritems():
  664. current_value = current_index.get(key, not_found)
  665. if current_value != value:
  666. logger.warning(line_format, bin_to_hex(key), value, current_value)
  667. for key, current_value in current_index.iteritems():
  668. if key in self.index:
  669. continue
  670. value = self.index.get(key, not_found)
  671. if current_value != value:
  672. logger.warning(line_format, bin_to_hex(key), value, current_value)
  673. elif current_index:
  674. for key, value in self.index.iteritems():
  675. if current_index.get(key, (-1, -1)) != value:
  676. report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
  677. if repair:
  678. self.compact_segments()
  679. self.write_index()
  680. self.rollback()
  681. if error_found:
  682. if repair:
  683. logger.info('Completed repository check, errors found and repaired.')
  684. else:
  685. logger.error('Completed repository check, errors found.')
  686. else:
  687. logger.info('Completed repository check, no problems found.')
  688. return not error_found or repair
  689. def _rollback(self, *, cleanup):
  690. """
  691. """
  692. if cleanup:
  693. self.io.cleanup(self.io.get_segments_transaction_id())
  694. self.index = None
  695. self._active_txn = False
  696. def rollback(self):
  697. self._rollback(cleanup=False)
  698. def __len__(self):
  699. if not self.index:
  700. self.index = self.open_index(self.get_transaction_id())
  701. return len(self.index)
  702. def __contains__(self, id):
  703. if not self.index:
  704. self.index = self.open_index(self.get_transaction_id())
  705. return id in self.index
  706. def list(self, limit=None, marker=None):
  707. """
  708. list <limit> IDs starting from after id <marker> - in index (pseudo-random) order.
  709. """
  710. if not self.index:
  711. self.index = self.open_index(self.get_transaction_id())
  712. return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
  713. def scan(self, limit=None, marker=None):
  714. """
  715. list <limit> IDs starting from after id <marker> - in on-disk order, so that a client
  716. fetching data in this order does linear reads and reuses stuff from disk cache.
  717. We rely on repository.check() has run already (either now or some time before) and that:
  718. - if we are called from a borg check command, self.index is a valid, fresh, in-sync repo index.
  719. - if we are called from elsewhere, either self.index or the on-disk index is valid and in-sync.
  720. - the repository segments are valid (no CRC errors).
  721. if we encounter CRC errors in segment entry headers, rest of segment is skipped.
  722. """
  723. if limit is not None and limit < 1:
  724. raise ValueError('please use limit > 0 or limit = None')
  725. if not self.index:
  726. transaction_id = self.get_transaction_id()
  727. self.index = self.open_index(transaction_id)
  728. at_start = marker is None
  729. # smallest valid seg is <uint32> 0, smallest valid offs is <uint32> 8
  730. start_segment, start_offset = (0, 0) if at_start else self.index[marker]
  731. result = []
  732. for segment, filename in self.io.segment_iterator(start_segment):
  733. obj_iterator = self.io.iter_objects(segment, start_offset, read_data=False, include_data=False)
  734. while True:
  735. try:
  736. tag, id, offset, size = next(obj_iterator)
  737. except (StopIteration, IntegrityError):
  738. # either end-of-segment or an error - we can not seek to objects at
  739. # higher offsets than one that has an error in the header fields
  740. break
  741. if start_offset > 0:
  742. # we are using a marker and the marker points to the last object we have already
  743. # returned in the previous scan() call - thus, we need to skip this one object.
  744. # also, for the next segment, we need to start at offset 0.
  745. start_offset = 0
  746. continue
  747. if tag == TAG_PUT and (segment, offset) == self.index.get(id):
  748. # we have found an existing and current object
  749. result.append(id)
  750. if len(result) == limit:
  751. return result
  752. return result
  753. def get(self, id):
  754. if not self.index:
  755. self.index = self.open_index(self.get_transaction_id())
  756. try:
  757. segment, offset = self.index[id]
  758. return self.io.read(segment, offset, id)
  759. except KeyError:
  760. raise self.ObjectNotFound(id, self.path) from None
  761. def get_many(self, ids, is_preloaded=False):
  762. for id_ in ids:
  763. yield self.get(id_)
  764. def put(self, id, data, wait=True):
  765. if not self._active_txn:
  766. self.prepare_txn(self.get_transaction_id())
  767. try:
  768. segment, offset = self.index[id]
  769. except KeyError:
  770. pass
  771. else:
  772. self.segments[segment] -= 1
  773. size = self.io.read(segment, offset, id, read_data=False)
  774. self.compact[segment] += size
  775. segment, size = self.io.write_delete(id)
  776. self.compact[segment] += size
  777. self.segments.setdefault(segment, 0)
  778. segment, offset = self.io.write_put(id, data)
  779. self.segments.setdefault(segment, 0)
  780. self.segments[segment] += 1
  781. self.index[id] = segment, offset
  782. def delete(self, id, wait=True):
  783. if not self._active_txn:
  784. self.prepare_txn(self.get_transaction_id())
  785. try:
  786. segment, offset = self.index.pop(id)
  787. except KeyError:
  788. raise self.ObjectNotFound(id, self.path) from None
  789. self.shadow_index.setdefault(id, []).append(segment)
  790. self.segments[segment] -= 1
  791. size = self.io.read(segment, offset, id, read_data=False)
  792. self.compact[segment] += size
  793. segment, size = self.io.write_delete(id)
  794. self.compact[segment] += size
  795. self.segments.setdefault(segment, 0)
  796. def preload(self, ids):
  797. """Preload objects (only applies to remote repositories)
  798. """
  799. class LoggedIO:
  800. class SegmentFull(Exception):
  801. """raised when a segment is full, before opening next"""
  802. header_fmt = struct.Struct('<IIB')
  803. assert header_fmt.size == 9
  804. put_header_fmt = struct.Struct('<IIB32s')
  805. assert put_header_fmt.size == 41
  806. header_no_crc_fmt = struct.Struct('<IB')
  807. assert header_no_crc_fmt.size == 5
  808. crc_fmt = struct.Struct('<I')
  809. assert crc_fmt.size == 4
  810. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  811. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  812. def __init__(self, path, limit, segments_per_dir, capacity=90):
  813. self.path = path
  814. self.fds = LRUCache(capacity,
  815. dispose=self.close_fd)
  816. self.segment = 0
  817. self.limit = limit
  818. self.segments_per_dir = segments_per_dir
  819. self.offset = 0
  820. self._write_fd = None
  821. def close(self):
  822. self.close_segment()
  823. self.fds.clear()
  824. self.fds = None # Just to make sure we're disabled
  825. def close_fd(self, fd):
  826. if hasattr(os, 'posix_fadvise'): # only on UNIX
  827. os.posix_fadvise(fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
  828. fd.close()
  829. def segment_iterator(self, segment=None, reverse=False):
  830. if segment is None:
  831. segment = 0 if not reverse else 2 ** 32 - 1
  832. data_path = os.path.join(self.path, 'data')
  833. start_segment_dir = segment // self.segments_per_dir
  834. dirs = os.listdir(data_path)
  835. if not reverse:
  836. dirs = [dir for dir in dirs if dir.isdigit() and int(dir) >= start_segment_dir]
  837. else:
  838. dirs = [dir for dir in dirs if dir.isdigit() and int(dir) <= start_segment_dir]
  839. dirs = sorted(dirs, key=int, reverse=reverse)
  840. for dir in dirs:
  841. filenames = os.listdir(os.path.join(data_path, dir))
  842. if not reverse:
  843. filenames = [filename for filename in filenames if filename.isdigit() and int(filename) >= segment]
  844. else:
  845. filenames = [filename for filename in filenames if filename.isdigit() and int(filename) <= segment]
  846. filenames = sorted(filenames, key=int, reverse=reverse)
  847. for filename in filenames:
  848. yield int(filename), os.path.join(data_path, dir, filename)
  849. def get_latest_segment(self):
  850. for segment, filename in self.segment_iterator(reverse=True):
  851. return segment
  852. return None
  853. def get_segments_transaction_id(self):
  854. """Return the last committed segment.
  855. """
  856. for segment, filename in self.segment_iterator(reverse=True):
  857. if self.is_committed_segment(segment):
  858. return segment
  859. return None
  860. def cleanup(self, transaction_id):
  861. """Delete segment files left by aborted transactions
  862. """
  863. self.segment = transaction_id + 1
  864. for segment, filename in self.segment_iterator(reverse=True):
  865. if segment > transaction_id:
  866. os.unlink(filename)
  867. else:
  868. break
  869. def is_committed_segment(self, segment):
  870. """Check if segment ends with a COMMIT_TAG tag
  871. """
  872. try:
  873. iterator = self.iter_objects(segment)
  874. except IntegrityError:
  875. return False
  876. with open(self.segment_filename(segment), 'rb') as fd:
  877. try:
  878. fd.seek(-self.header_fmt.size, os.SEEK_END)
  879. except OSError as e:
  880. # return False if segment file is empty or too small
  881. if e.errno == errno.EINVAL:
  882. return False
  883. raise e
  884. if fd.read(self.header_fmt.size) != self.COMMIT:
  885. return False
  886. seen_commit = False
  887. while True:
  888. try:
  889. tag, key, offset, _ = next(iterator)
  890. except IntegrityError:
  891. return False
  892. except StopIteration:
  893. break
  894. if tag == TAG_COMMIT:
  895. seen_commit = True
  896. continue
  897. if seen_commit:
  898. return False
  899. return seen_commit
  900. def segment_filename(self, segment):
  901. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  902. def get_write_fd(self, no_new=False, raise_full=False):
  903. if not no_new and self.offset and self.offset > self.limit:
  904. if raise_full:
  905. raise self.SegmentFull
  906. self.close_segment()
  907. if not self._write_fd:
  908. if self.segment % self.segments_per_dir == 0:
  909. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  910. if not os.path.exists(dirname):
  911. os.mkdir(dirname)
  912. sync_dir(os.path.join(self.path, 'data'))
  913. self._write_fd = SyncFile(self.segment_filename(self.segment), binary=True)
  914. self._write_fd.write(MAGIC)
  915. self.offset = MAGIC_LEN
  916. return self._write_fd
  917. def get_fd(self, segment):
  918. try:
  919. return self.fds[segment]
  920. except KeyError:
  921. fd = open(self.segment_filename(segment), 'rb')
  922. self.fds[segment] = fd
  923. return fd
  924. def close_segment(self):
  925. if self._write_fd:
  926. self.segment += 1
  927. self.offset = 0
  928. self._write_fd.close()
  929. self._write_fd = None
  930. def delete_segment(self, segment):
  931. if segment in self.fds:
  932. del self.fds[segment]
  933. try:
  934. os.unlink(self.segment_filename(segment))
  935. except FileNotFoundError:
  936. pass
  937. def segment_exists(self, segment):
  938. return os.path.exists(self.segment_filename(segment))
  939. def segment_size(self, segment):
  940. return os.path.getsize(self.segment_filename(segment))
  941. def iter_objects(self, segment, offset=0, include_data=False, read_data=True):
  942. """
  943. Return object iterator for *segment*.
  944. If read_data is False then include_data must be False as well.
  945. Integrity checks are skipped: all data obtained from the iterator must be considered informational.
  946. The iterator returns four-tuples of (tag, key, offset, data|size).
  947. """
  948. fd = self.get_fd(segment)
  949. fd.seek(offset)
  950. if offset == 0:
  951. # we are touching this segment for the first time, check the MAGIC.
  952. # Repository.scan() calls us with segment > 0 when it continues an ongoing iteration
  953. # from a marker position - but then we have checked the magic before already.
  954. if fd.read(MAGIC_LEN) != MAGIC:
  955. raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
  956. offset = MAGIC_LEN
  957. header = fd.read(self.header_fmt.size)
  958. while header:
  959. size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
  960. (TAG_PUT, TAG_DELETE, TAG_COMMIT),
  961. read_data=read_data)
  962. if include_data:
  963. yield tag, key, offset, data
  964. else:
  965. yield tag, key, offset, size
  966. offset += size
  967. # we must get the fd via get_fd() here again as we yielded to our caller and it might
  968. # have triggered closing of the fd we had before (e.g. by calling io.read() for
  969. # different segment(s)).
  970. # by calling get_fd() here again we also make our fd "recently used" so it likely
  971. # does not get kicked out of self.fds LRUcache.
  972. fd = self.get_fd(segment)
  973. fd.seek(offset)
  974. header = fd.read(self.header_fmt.size)
  975. def recover_segment(self, segment, filename):
  976. if segment in self.fds:
  977. del self.fds[segment]
  978. with open(filename, 'rb') as fd:
  979. data = memoryview(fd.read())
  980. os.rename(filename, filename + '.beforerecover')
  981. logger.info('attempting to recover ' + filename)
  982. with open(filename, 'wb') as fd:
  983. fd.write(MAGIC)
  984. while len(data) >= self.header_fmt.size:
  985. crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
  986. if size < self.header_fmt.size or size > len(data):
  987. data = data[1:]
  988. continue
  989. if crc32(data[4:size]) & 0xffffffff != crc:
  990. data = data[1:]
  991. continue
  992. fd.write(data[:size])
  993. data = data[size:]
  994. def read(self, segment, offset, id, read_data=True):
  995. """
  996. Read entry from *segment* at *offset* with *id*.
  997. If read_data is False the size of the entry is returned instead and integrity checks are skipped.
  998. The return value should thus be considered informational.
  999. """
  1000. if segment == self.segment and self._write_fd:
  1001. self._write_fd.sync()
  1002. fd = self.get_fd(segment)
  1003. fd.seek(offset)
  1004. header = fd.read(self.put_header_fmt.size)
  1005. size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
  1006. if id != key:
  1007. raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
  1008. segment, offset))
  1009. return data if read_data else size
  1010. def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
  1011. # some code shared by read() and iter_objects()
  1012. try:
  1013. hdr_tuple = fmt.unpack(header)
  1014. except struct.error as err:
  1015. raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
  1016. segment, offset, err)) from None
  1017. if fmt is self.put_header_fmt:
  1018. crc, size, tag, key = hdr_tuple
  1019. elif fmt is self.header_fmt:
  1020. crc, size, tag = hdr_tuple
  1021. key = None
  1022. else:
  1023. raise TypeError("_read called with unsupported format")
  1024. if size > MAX_OBJECT_SIZE:
  1025. # if you get this on an archive made with borg < 1.0.7 and millions of files and
  1026. # you need to restore it, you can disable this check by using "if False:" above.
  1027. raise IntegrityError('Invalid segment entry size {} - too big [segment {}, offset {}]'.format(
  1028. size, segment, offset))
  1029. if size < fmt.size:
  1030. raise IntegrityError('Invalid segment entry size {} - too small [segment {}, offset {}]'.format(
  1031. size, segment, offset))
  1032. length = size - fmt.size
  1033. if read_data:
  1034. data = fd.read(length)
  1035. if len(data) != length:
  1036. raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
  1037. segment, offset, length, len(data)))
  1038. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  1039. raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
  1040. segment, offset))
  1041. if key is None and tag in (TAG_PUT, TAG_DELETE):
  1042. key, data = data[:32], data[32:]
  1043. else:
  1044. if key is None and tag in (TAG_PUT, TAG_DELETE):
  1045. key = fd.read(32)
  1046. length -= 32
  1047. if len(key) != 32:
  1048. raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
  1049. segment, offset, 32, len(key)))
  1050. oldpos = fd.tell()
  1051. seeked = fd.seek(length, os.SEEK_CUR) - oldpos
  1052. data = None
  1053. if seeked != length:
  1054. raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
  1055. segment, offset, length, seeked))
  1056. if tag not in acceptable_tags:
  1057. raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
  1058. segment, offset))
  1059. return size, tag, key, data
  1060. def write_put(self, id, data, raise_full=False):
  1061. data_size = len(data)
  1062. if data_size > MAX_DATA_SIZE:
  1063. # this would push the segment entry size beyond MAX_OBJECT_SIZE.
  1064. raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
  1065. fd = self.get_write_fd(raise_full=raise_full)
  1066. size = data_size + self.put_header_fmt.size
  1067. offset = self.offset
  1068. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  1069. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  1070. fd.write(b''.join((crc, header, id, data)))
  1071. self.offset += size
  1072. return self.segment, offset
  1073. def write_delete(self, id, raise_full=False):
  1074. fd = self.get_write_fd(raise_full=raise_full)
  1075. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  1076. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  1077. fd.write(b''.join((crc, header, id)))
  1078. self.offset += self.put_header_fmt.size
  1079. return self.segment, self.put_header_fmt.size
  1080. def write_commit(self, intermediate=False):
  1081. if intermediate:
  1082. # Intermediate commits go directly into the current segment - this makes checking their validity more
  1083. # expensive, but is faster and reduces clobber.
  1084. fd = self.get_write_fd()
  1085. fd.sync()
  1086. else:
  1087. self.close_segment()
  1088. fd = self.get_write_fd()
  1089. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  1090. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  1091. fd.write(b''.join((crc, header)))
  1092. self.close_segment()
  1093. return self.segment - 1 # close_segment() increments it
  1094. MAX_DATA_SIZE = MAX_OBJECT_SIZE - LoggedIO.put_header_fmt.size