repository.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. from configparser import RawConfigParser
  2. from binascii import hexlify
  3. from itertools import islice
  4. import errno
  5. import os
  6. import shutil
  7. import struct
  8. import sys
  9. from zlib import crc32
  10. from .hashindex import NSIndex
  11. from .helpers import Error, IntegrityError, read_msgpack, write_msgpack, unhexlify, UpgradableLock
  12. from .lrucache import LRUCache
  13. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  14. MAGIC = b'ATTICSEG'
  15. TAG_PUT = 0
  16. TAG_DELETE = 1
  17. TAG_COMMIT = 2
  18. class Repository(object):
  19. """Filesystem based transactional key value store
  20. On disk layout:
  21. dir/README
  22. dir/config
  23. dir/data/<X / SEGMENTS_PER_DIR>/<X>
  24. dir/index.X
  25. dir/hints.X
  26. """
  27. DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
  28. DEFAULT_SEGMENTS_PER_DIR = 10000
  29. class DoesNotExist(Error):
  30. """Repository {} does not exist"""
  31. class AlreadyExists(Error):
  32. """Repository {} already exists"""
  33. class InvalidRepository(Error):
  34. """{} is not a valid repository"""
  35. class CheckNeeded(Error):
  36. '''Inconsistency detected. Please run "attic check {}"'''
  37. def __init__(self, path, create=False):
  38. self.path = path
  39. self.io = None
  40. self.lock = None
  41. self.index = None
  42. self._active_txn = False
  43. if create:
  44. self.create(path)
  45. self.open(path)
  46. def __del__(self):
  47. self.close()
  48. def create(self, path):
  49. """Create a new empty repository at `path`
  50. """
  51. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  52. raise self.AlreadyExists(path)
  53. if not os.path.exists(path):
  54. os.mkdir(path)
  55. with open(os.path.join(path, 'README'), 'w') as fd:
  56. fd.write('This is an Attic repository\n')
  57. os.mkdir(os.path.join(path, 'data'))
  58. config = RawConfigParser()
  59. config.add_section('repository')
  60. config.set('repository', 'version', '1')
  61. config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
  62. config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
  63. config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
  64. with open(os.path.join(path, 'config'), 'w') as fd:
  65. config.write(fd)
  66. def get_index_transaction_id(self):
  67. indicies = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
  68. if indicies:
  69. return indicies[-1]
  70. else:
  71. return None
  72. def get_transaction_id(self):
  73. index_transaction_id = self.get_index_transaction_id()
  74. segments_transaction_id = self.io.get_segments_transaction_id()
  75. if index_transaction_id is not None and segments_transaction_id is None:
  76. raise self.CheckNeeded(self.path)
  77. # Attempt to automatically rebuild index if we crashed between commit
  78. # tag write and index save
  79. if index_transaction_id != segments_transaction_id:
  80. if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
  81. replay_from = None
  82. else:
  83. replay_from = index_transaction_id
  84. self.replay_segments(replay_from, segments_transaction_id)
  85. return self.get_index_transaction_id()
  86. def open(self, path):
  87. self.path = path
  88. if not os.path.isdir(path):
  89. raise self.DoesNotExist(path)
  90. self.config = RawConfigParser()
  91. self.config.read(os.path.join(self.path, 'config'))
  92. if not 'repository' in self.config.sections() or self.config.getint('repository', 'version') != 1:
  93. raise self.InvalidRepository(path)
  94. self.lock = UpgradableLock(os.path.join(path, 'config'))
  95. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  96. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  97. self.id = unhexlify(self.config.get('repository', 'id').strip())
  98. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
  99. def close(self):
  100. if self.lock:
  101. if self.io:
  102. self.io.close()
  103. self.io = None
  104. self.lock.release()
  105. self.lock = None
  106. def commit(self):
  107. """Commit transaction
  108. """
  109. self.io.write_commit()
  110. self.compact_segments()
  111. self.write_index()
  112. self.rollback()
  113. def get_read_only_index(self, transaction_id):
  114. if transaction_id is None:
  115. return {}
  116. return NSIndex((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'), readonly=True)
  117. def get_index(self, transaction_id, do_cleanup=True):
  118. self._active_txn = True
  119. self.lock.upgrade()
  120. if transaction_id is None:
  121. self.index = NSIndex.create(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  122. self.segments = {}
  123. self.compact = set()
  124. else:
  125. if do_cleanup:
  126. self.io.cleanup(transaction_id)
  127. shutil.copy(os.path.join(self.path, 'index.%d' % transaction_id),
  128. os.path.join(self.path, 'index.tmp'))
  129. self.index = NSIndex(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  130. hints = read_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id))
  131. if hints[b'version'] != 1:
  132. raise ValueError('Unknown hints file version: %d' % hints['version'])
  133. self.segments = hints[b'segments']
  134. self.compact = set(hints[b'compact'])
  135. def write_index(self):
  136. hints = {b'version': 1,
  137. b'segments': self.segments,
  138. b'compact': list(self.compact)}
  139. transaction_id = self.io.get_segments_transaction_id()
  140. write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id), hints)
  141. self.index.flush()
  142. os.rename(os.path.join(self.path, 'index.tmp'),
  143. os.path.join(self.path, 'index.%d' % transaction_id))
  144. # Remove old indices
  145. current = '.%d' % transaction_id
  146. for name in os.listdir(self.path):
  147. if not name.startswith('index.') and not name.startswith('hints.'):
  148. continue
  149. if name.endswith(current):
  150. continue
  151. os.unlink(os.path.join(self.path, name))
  152. self.index = None
  153. def compact_segments(self):
  154. """Compact sparse segments by copying data into new segments
  155. """
  156. if not self.compact:
  157. return
  158. index_transaction_id = self.get_index_transaction_id()
  159. segments = self.segments
  160. for segment in sorted(self.compact):
  161. if self.io.segment_exists(segment):
  162. for tag, key, data in self.io.iter_objects(segment, include_data=True):
  163. if tag == TAG_PUT and self.index.get(key, (-1, -1))[0] == segment:
  164. new_segment, offset = self.io.write_put(key, data)
  165. self.index[key] = new_segment, offset
  166. segments.setdefault(new_segment, 0)
  167. segments[new_segment] += 1
  168. segments[segment] -= 1
  169. elif tag == TAG_DELETE:
  170. if index_transaction_id is None or segment > index_transaction_id:
  171. self.io.write_delete(key)
  172. assert segments[segment] == 0
  173. self.io.write_commit()
  174. for segment in sorted(self.compact):
  175. assert self.segments.pop(segment) == 0
  176. self.io.delete_segment(segment)
  177. self.compact = set()
  178. def replay_segments(self, index_transaction_id, segments_transaction_id):
  179. self.get_index(index_transaction_id, do_cleanup=False)
  180. for segment, filename in self.io.segment_iterator():
  181. if index_transaction_id is not None and segment <= index_transaction_id:
  182. continue
  183. if segment > segments_transaction_id:
  184. break
  185. self.segments[segment] = 0
  186. for tag, key, offset in self.io.iter_objects(segment):
  187. if tag == TAG_PUT:
  188. try:
  189. s, _ = self.index[key]
  190. self.compact.add(s)
  191. self.segments[s] -= 1
  192. except KeyError:
  193. pass
  194. self.index[key] = segment, offset
  195. self.segments[segment] += 1
  196. elif tag == TAG_DELETE:
  197. try:
  198. s, _ = self.index.pop(key)
  199. self.segments[s] -= 1
  200. self.compact.add(s)
  201. except KeyError:
  202. pass
  203. self.compact.add(segment)
  204. elif tag == TAG_COMMIT:
  205. continue
  206. else:
  207. raise self.CheckNeeded(self.path)
  208. if self.segments[segment] == 0:
  209. self.compact.add(segment)
  210. self.write_index()
  211. self.rollback()
  212. def check(self, repair=False):
  213. """Check repository consistency
  214. This method verifies all segment checksums and makes sure
  215. the index is consistent with the data stored in the segments.
  216. """
  217. error_found = False
  218. def report_progress(msg, error=False):
  219. nonlocal error_found
  220. if error:
  221. error_found = True
  222. print(msg, file=sys.stderr)
  223. sys.stderr.flush()
  224. assert not self._active_txn
  225. report_progress('Starting repository check...')
  226. try:
  227. transaction_id = self.get_transaction_id()
  228. current_index = self.get_read_only_index(transaction_id)
  229. except Exception:
  230. transaction_id = self.io.get_segments_transaction_id()
  231. current_index = None
  232. if transaction_id is None:
  233. transaction_id = self.get_index_transaction_id()
  234. if transaction_id is None:
  235. transaction_id = self.io.get_latest_segment()
  236. segments_transaction_id = self.io.get_segments_transaction_id()
  237. self.get_index(None)
  238. for segment, filename in self.io.segment_iterator():
  239. if segment > transaction_id:
  240. continue
  241. try:
  242. objects = list(self.io.iter_objects(segment))
  243. except (IntegrityError, struct.error):
  244. report_progress('Error reading segment {}'.format(segment), error=True)
  245. objects = []
  246. if repair:
  247. self.io.recover_segment(segment, filename)
  248. objects = list(self.io.iter_objects(segment))
  249. self.segments[segment] = 0
  250. for tag, key, offset in objects:
  251. if tag == TAG_PUT:
  252. try:
  253. s, _ = self.index[key]
  254. self.compact.add(s)
  255. self.segments[s] -= 1
  256. except KeyError:
  257. pass
  258. self.index[key] = segment, offset
  259. self.segments[segment] += 1
  260. elif tag == TAG_DELETE:
  261. try:
  262. s, _ = self.index.pop(key)
  263. self.segments[s] -= 1
  264. self.compact.add(s)
  265. self.compact.add(segment)
  266. except KeyError:
  267. pass
  268. elif tag == TAG_COMMIT:
  269. continue
  270. else:
  271. report_progress('Unexpected tag {} in segment {}'.format(tag, segment), error=True)
  272. # We might need to add a commit tag if no committed segment is found
  273. if repair and segments_transaction_id is None:
  274. report_progress('Adding commit tag to segment {}'.format(transaction_id))
  275. self.io.segment = transaction_id + 1
  276. self.io.write_commit()
  277. self.io.close_segment()
  278. if current_index and not repair:
  279. if len(current_index) != len(self.index):
  280. report_progress('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)), error=True)
  281. elif current_index:
  282. for key, value in self.index.iteritems():
  283. if current_index.get(key, (-1, -1)) != value:
  284. report_progress('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))), error=True)
  285. if not error_found:
  286. report_progress('Repository check complete, no problems found.')
  287. if repair:
  288. self.compact_segments()
  289. self.write_index()
  290. else:
  291. os.unlink(os.path.join(self.path, 'index.tmp'))
  292. self.rollback()
  293. return not error_found or repair
  294. def rollback(self):
  295. """
  296. """
  297. self.index = None
  298. self._active_txn = False
  299. def __len__(self):
  300. if not self.index:
  301. self.index = self.get_read_only_index(self.get_transaction_id())
  302. return len(self.index)
  303. def list(self, limit=None, marker=None):
  304. if not self.index:
  305. self.index = self.get_read_only_index(self.get_transaction_id())
  306. return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
  307. def get(self, id_):
  308. if not self.index:
  309. self.index = self.get_read_only_index(self.get_transaction_id())
  310. try:
  311. segment, offset = self.index[id_]
  312. return self.io.read(segment, offset, id_)
  313. except KeyError:
  314. raise self.DoesNotExist(self.path)
  315. def get_many(self, ids, is_preloaded=False):
  316. for id_ in ids:
  317. yield self.get(id_)
  318. def put(self, id, data, wait=True):
  319. if not self._active_txn:
  320. self.get_index(self.get_transaction_id())
  321. try:
  322. segment, _ = self.index[id]
  323. self.segments[segment] -= 1
  324. self.compact.add(segment)
  325. segment = self.io.write_delete(id)
  326. self.segments.setdefault(segment, 0)
  327. self.compact.add(segment)
  328. except KeyError:
  329. pass
  330. segment, offset = self.io.write_put(id, data)
  331. self.segments.setdefault(segment, 0)
  332. self.segments[segment] += 1
  333. self.index[id] = segment, offset
  334. def delete(self, id, wait=True):
  335. if not self._active_txn:
  336. self.get_index(self.get_transaction_id())
  337. try:
  338. segment, offset = self.index.pop(id)
  339. except KeyError:
  340. raise self.DoesNotExist(self.path)
  341. self.segments[segment] -= 1
  342. self.compact.add(segment)
  343. segment = self.io.write_delete(id)
  344. self.compact.add(segment)
  345. self.segments.setdefault(segment, 0)
  346. def preload(self, ids):
  347. """Preload objects (only applies to remote repositories
  348. """
  349. class LoggedIO(object):
  350. header_fmt = struct.Struct('<IIB')
  351. assert header_fmt.size == 9
  352. put_header_fmt = struct.Struct('<IIB32s')
  353. assert put_header_fmt.size == 41
  354. header_no_crc_fmt = struct.Struct('<IB')
  355. assert header_no_crc_fmt.size == 5
  356. crc_fmt = struct.Struct('<I')
  357. assert crc_fmt.size == 4
  358. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  359. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  360. def __init__(self, path, limit, segments_per_dir, capacity=90):
  361. self.path = path
  362. self.fds = LRUCache(capacity)
  363. self.segment = 0
  364. self.limit = limit
  365. self.segments_per_dir = segments_per_dir
  366. self.offset = 0
  367. self._write_fd = None
  368. def close(self):
  369. for segment in list(self.fds.keys()):
  370. self.fds.pop(segment).close()
  371. self.close_segment()
  372. self.fds = None # Just to make sure we're disabled
  373. def segment_iterator(self, reverse=False):
  374. for dirpath, dirs, filenames in os.walk(os.path.join(self.path, 'data')):
  375. dirs.sort(key=int, reverse=reverse)
  376. filenames = sorted((filename for filename in filenames if filename.isdigit()), key=int, reverse=reverse)
  377. for filename in filenames:
  378. yield int(filename), os.path.join(dirpath, filename)
  379. def get_latest_segment(self):
  380. for segment, filename in self.segment_iterator(reverse=True):
  381. return segment
  382. return None
  383. def get_segments_transaction_id(self):
  384. """Verify that the transaction id is consistent with the index transaction id
  385. """
  386. for segment, filename in self.segment_iterator(reverse=True):
  387. if self.is_committed_segment(filename):
  388. return segment
  389. return None
  390. def cleanup(self, transaction_id):
  391. """Delete segment files left by aborted transactions
  392. """
  393. self.segment = transaction_id + 1
  394. for segment, filename in self.segment_iterator(reverse=True):
  395. if segment > transaction_id:
  396. os.unlink(filename)
  397. else:
  398. break
  399. def is_committed_segment(self, filename):
  400. """Check if segment ends with a COMMIT_TAG tag
  401. """
  402. with open(filename, 'rb') as fd:
  403. try:
  404. fd.seek(-self.header_fmt.size, os.SEEK_END)
  405. except Exception as e:
  406. # return False if segment file is empty or too small
  407. if e.errno == errno.EINVAL:
  408. return False
  409. raise e
  410. return fd.read(self.header_fmt.size) == self.COMMIT
  411. def segment_filename(self, segment):
  412. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  413. def get_write_fd(self, no_new=False):
  414. if not no_new and self.offset and self.offset > self.limit:
  415. self.close_segment()
  416. if not self._write_fd:
  417. if self.segment % self.segments_per_dir == 0:
  418. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  419. if not os.path.exists(dirname):
  420. os.mkdir(dirname)
  421. self._write_fd = open(self.segment_filename(self.segment), 'ab')
  422. self._write_fd.write(MAGIC)
  423. self.offset = 8
  424. return self._write_fd
  425. def get_fd(self, segment):
  426. try:
  427. return self.fds[segment]
  428. except KeyError:
  429. fd = open(self.segment_filename(segment), 'rb')
  430. self.fds[segment] = fd
  431. return fd
  432. def delete_segment(self, segment):
  433. try:
  434. os.unlink(self.segment_filename(segment))
  435. except OSError:
  436. pass
  437. def segment_exists(self, segment):
  438. return os.path.exists(self.segment_filename(segment))
  439. def iter_objects(self, segment, include_data=False):
  440. fd = self.get_fd(segment)
  441. fd.seek(0)
  442. if fd.read(8) != MAGIC:
  443. raise IntegrityError('Invalid segment header')
  444. offset = 8
  445. header = fd.read(self.header_fmt.size)
  446. while header:
  447. crc, size, tag = self.header_fmt.unpack(header)
  448. if size > MAX_OBJECT_SIZE:
  449. raise IntegrityError('Invalid segment object size')
  450. rest = fd.read(size - self.header_fmt.size)
  451. if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  452. raise IntegrityError('Segment checksum mismatch')
  453. if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
  454. raise IntegrityError('Invalid segment entry header')
  455. key = None
  456. if tag in (TAG_PUT, TAG_DELETE):
  457. key = rest[:32]
  458. if include_data:
  459. yield tag, key, rest[32:]
  460. else:
  461. yield tag, key, offset
  462. offset += size
  463. header = fd.read(self.header_fmt.size)
  464. def recover_segment(self, segment, filename):
  465. self.fds.pop(segment).close()
  466. # FIXME: save a copy of the original file
  467. with open(filename, 'rb') as fd:
  468. data = memoryview(fd.read())
  469. os.rename(filename, filename + '.beforerecover')
  470. print('attempting to recover ' + filename, file=sys.stderr)
  471. with open(filename, 'wb') as fd:
  472. fd.write(MAGIC)
  473. while len(data) >= self.header_fmt.size:
  474. crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
  475. if size < self.header_fmt.size or size > len(data):
  476. data = data[1:]
  477. continue
  478. if crc32(data[4:size]) & 0xffffffff != crc:
  479. data = data[1:]
  480. continue
  481. fd.write(data[:size])
  482. data = data[size:]
  483. def read(self, segment, offset, id):
  484. if segment == self.segment and self._write_fd:
  485. self._write_fd.flush()
  486. fd = self.get_fd(segment)
  487. fd.seek(offset)
  488. header = fd.read(self.put_header_fmt.size)
  489. crc, size, tag, key = self.put_header_fmt.unpack(header)
  490. if size > MAX_OBJECT_SIZE:
  491. raise IntegrityError('Invalid segment object size')
  492. data = fd.read(size - self.put_header_fmt.size)
  493. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  494. raise IntegrityError('Segment checksum mismatch')
  495. if tag != TAG_PUT or id != key:
  496. raise IntegrityError('Invalid segment entry header')
  497. return data
  498. def write_put(self, id, data):
  499. size = len(data) + self.put_header_fmt.size
  500. fd = self.get_write_fd()
  501. offset = self.offset
  502. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  503. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  504. fd.write(b''.join((crc, header, id, data)))
  505. self.offset += size
  506. return self.segment, offset
  507. def write_delete(self, id):
  508. fd = self.get_write_fd()
  509. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  510. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  511. fd.write(b''.join((crc, header, id)))
  512. self.offset += self.put_header_fmt.size
  513. return self.segment
  514. def write_commit(self):
  515. fd = self.get_write_fd(no_new=True)
  516. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  517. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  518. fd.write(b''.join((crc, header)))
  519. self.close_segment()
  520. def close_segment(self):
  521. if self._write_fd:
  522. self.segment += 1
  523. self.offset = 0
  524. os.fsync(self._write_fd)
  525. self._write_fd.close()
  526. self._write_fd = None