repository.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. from configparser import RawConfigParser
  2. from binascii import hexlify
  3. import errno
  4. import os
  5. import re
  6. import shutil
  7. import struct
  8. import sys
  9. import time
  10. from zlib import crc32
  11. from .hashindex import NSIndex
  12. from .helpers import Error, IntegrityError, read_msgpack, write_msgpack, unhexlify, UpgradableLock
  13. from .lrucache import LRUCache
  14. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  15. MAGIC = b'ATTICSEG'
  16. TAG_PUT = 0
  17. TAG_DELETE = 1
  18. TAG_COMMIT = 2
  19. class Repository(object):
  20. """Filesystem based transactional key value store
  21. On disk layout:
  22. dir/README
  23. dir/config
  24. dir/data/<X / SEGMENTS_PER_DIR>/<X>
  25. dir/index.X
  26. dir/hints.X
  27. """
  28. DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
  29. DEFAULT_SEGMENTS_PER_DIR = 10000
  30. class DoesNotExist(Error):
  31. """Repository {} does not exist"""
  32. class AlreadyExists(Error):
  33. """Repository {} already exists"""
  34. class InvalidRepository(Error):
  35. """{} is not a valid repository"""
  36. class CheckNeeded(Error):
  37. '''Inconsistency detected. Please run "attic check {}"'''
  38. def __init__(self, path, create=False):
  39. self.path = path
  40. self.io = None
  41. self.lock = None
  42. self.index = None
  43. self._active_txn = False
  44. if create:
  45. self.create(path)
  46. self.open(path)
  47. def __del__(self):
  48. self.close()
  49. def create(self, path):
  50. """Create a new empty repository at `path`
  51. """
  52. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  53. raise self.AlreadyExists(path)
  54. if not os.path.exists(path):
  55. os.mkdir(path)
  56. with open(os.path.join(path, 'README'), 'w') as fd:
  57. fd.write('This is an Attic repository\n')
  58. os.mkdir(os.path.join(path, 'data'))
  59. config = RawConfigParser()
  60. config.add_section('repository')
  61. config.set('repository', 'version', '1')
  62. config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
  63. config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
  64. config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
  65. with open(os.path.join(path, 'config'), 'w') as fd:
  66. config.write(fd)
  67. def get_index_transaction_id(self):
  68. indicies = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
  69. if indicies:
  70. return indicies[-1]
  71. else:
  72. return None
  73. def get_transaction_id(self):
  74. index_transaction_id = self.get_index_transaction_id()
  75. segments_transaction_id = self.io.get_segments_transaction_id(index_transaction_id or 0)
  76. if index_transaction_id != segments_transaction_id:
  77. raise self.CheckNeeded(self.path)
  78. return index_transaction_id
  79. def open(self, path):
  80. self.path = path
  81. if not os.path.isdir(path):
  82. raise self.DoesNotExist(path)
  83. self.config = RawConfigParser()
  84. self.config.read(os.path.join(self.path, 'config'))
  85. if not 'repository' in self.config.sections() or self.config.getint('repository', 'version') != 1:
  86. raise self.InvalidRepository(path)
  87. self.lock = UpgradableLock(os.path.join(path, 'config'))
  88. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  89. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  90. self.id = unhexlify(self.config.get('repository', 'id').strip())
  91. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
  92. def close(self):
  93. if self.lock:
  94. if self.io:
  95. self.io.close()
  96. self.io = None
  97. self.lock.release()
  98. self.lock = None
  99. def commit(self):
  100. """Commit transaction
  101. """
  102. self.io.write_commit()
  103. self.compact_segments()
  104. self.write_index()
  105. self.rollback()
  106. def get_read_only_index(self, transaction_id):
  107. if transaction_id is None:
  108. return {}
  109. return NSIndex((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'), readonly=True)
  110. def get_index(self, transaction_id):
  111. self.lock.upgrade()
  112. if transaction_id is None:
  113. self.index = NSIndex.create(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  114. self.segments = {}
  115. self.compact = set()
  116. else:
  117. self.io.cleanup(transaction_id)
  118. shutil.copy(os.path.join(self.path, 'index.%d' % transaction_id),
  119. os.path.join(self.path, 'index.tmp'))
  120. self.index = NSIndex(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  121. hints = read_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id))
  122. if hints[b'version'] != 1:
  123. raise ValueError('Unknown hints file version: %d' % hints['version'])
  124. self.segments = hints[b'segments']
  125. self.compact = set(hints[b'compact'])
  126. def write_index(self):
  127. hints = {b'version': 1,
  128. b'segments': self.segments,
  129. b'compact': list(self.compact)}
  130. transaction_id = self.io.get_segments_transaction_id()
  131. write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id), hints)
  132. self.index.flush()
  133. os.rename(os.path.join(self.path, 'index.tmp'),
  134. os.path.join(self.path, 'index.%d' % transaction_id))
  135. # Remove old indices
  136. current = '.%d' % transaction_id
  137. for name in os.listdir(self.path):
  138. if not name.startswith('index.') and not name.startswith('hints.'):
  139. continue
  140. if name.endswith(current):
  141. continue
  142. os.unlink(os.path.join(self.path, name))
  143. def compact_segments(self):
  144. """Compact sparse segments by copying data into new segments
  145. """
  146. if not self.compact:
  147. return
  148. def lookup(tag, key):
  149. return tag == TAG_PUT and self.index.get(key, (-1, -1))[0] == segment
  150. segments = self.segments
  151. for segment in sorted(self.compact):
  152. if segments[segment] > 0:
  153. for tag, key, data in self.io.iter_objects(segment, lookup, include_data=True):
  154. new_segment, offset = self.io.write_put(key, data)
  155. self.index[key] = new_segment, offset
  156. segments.setdefault(new_segment, 0)
  157. segments[new_segment] += 1
  158. segments[segment] -= 1
  159. assert segments[segment] == 0
  160. self.io.write_commit()
  161. for segment in self.compact:
  162. assert self.segments.pop(segment) == 0
  163. self.io.delete_segment(segment)
  164. self.compact = set()
  165. def check(self, progress=False, repair=False):
  166. """Check repository consistency
  167. This method verifies all segment checksums and makes sure
  168. the index is consistent with the data stored in the segments.
  169. """
  170. error_found = False
  171. def report_progress(msg, error=False):
  172. nonlocal error_found
  173. if error:
  174. error_found = True
  175. if error or progress:
  176. print(msg, file=sys.stderr)
  177. sys.stderr.flush()
  178. assert not self._active_txn
  179. index_transaction_id = self.get_index_transaction_id()
  180. segments_transaction_id = self.io.get_segments_transaction_id(index_transaction_id)
  181. if index_transaction_id is None and segments_transaction_id is None:
  182. return True
  183. transaction_id = max(index_transaction_id or 0, segments_transaction_id or 0)
  184. self.get_index(None)
  185. if index_transaction_id == segments_transaction_id:
  186. current_index = self.get_read_only_index(transaction_id)
  187. else:
  188. current_index = None
  189. report_progress('No suitable index found', error=True)
  190. progress_time = None
  191. for segment, filename in self.io.segment_iterator():
  192. if segment > transaction_id:
  193. continue
  194. if progress:
  195. if int(time.time()) != progress_time:
  196. progress_time = int(time.time())
  197. report_progress('Checking segment {}/{}'.format(segment, transaction_id))
  198. try:
  199. objects = list(self.io.iter_objects(segment))
  200. except (IntegrityError, struct.error):
  201. report_progress('Error reading segment {}'.format(segment), error=True)
  202. objects = []
  203. if repair:
  204. self.io.recover_segment(segment, filename)
  205. objects = list(self.io.iter_objects(segment))
  206. self.segments[segment] = 0
  207. for tag, key, offset in objects:
  208. if tag == TAG_PUT:
  209. try:
  210. s, _ = self.index[key]
  211. self.compact.add(s)
  212. self.segments[s] -= 1
  213. report_progress('Key found in more than one segment. Segment={}, key={}'.format(segment, hexlify(key)), error=True)
  214. except KeyError:
  215. pass
  216. self.index[key] = segment, offset
  217. self.segments[segment] += 1
  218. elif tag == TAG_DELETE:
  219. try:
  220. s, _ = self.index.pop(key)
  221. self.segments[s] -= 1
  222. self.compact.add(s)
  223. self.compact.add(segment)
  224. except KeyError:
  225. pass
  226. elif tag == TAG_COMMIT:
  227. continue
  228. else:
  229. report_progress('Unexpected tag {} in segment {}'.format(tag, segment), error=True)
  230. if current_index and len(current_index) != len(self.index):
  231. report_progress('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)), error=True)
  232. if not error_found:
  233. report_progress('Check complete, no errors found.')
  234. if repair:
  235. self.write_index()
  236. self.rollback()
  237. return not error_found or repair
  238. def rollback(self):
  239. """
  240. """
  241. self.index = None
  242. self._active_txn = False
  243. def __len__(self):
  244. if not self.index:
  245. self.index = self.get_read_only_index(self.get_transaction_id())
  246. return len(self.index)
  247. def get(self, id_):
  248. if not self.index:
  249. self.index = self.get_read_only_index(self.get_transaction_id())
  250. try:
  251. segment, offset = self.index[id_]
  252. return self.io.read(segment, offset, id_)
  253. except KeyError:
  254. raise self.DoesNotExist(self.path)
  255. def get_many(self, ids, is_preloaded=False):
  256. for id_ in ids:
  257. yield self.get(id_)
  258. def put(self, id, data, wait=True):
  259. if not self._active_txn:
  260. self.get_index(self.get_transaction_id())
  261. self._active_txn = True
  262. try:
  263. segment, _ = self.index[id]
  264. self.segments[segment] -= 1
  265. self.compact.add(segment)
  266. segment = self.io.write_delete(id)
  267. self.segments.setdefault(segment, 0)
  268. self.compact.add(segment)
  269. except KeyError:
  270. pass
  271. segment, offset = self.io.write_put(id, data)
  272. self.segments.setdefault(segment, 0)
  273. self.segments[segment] += 1
  274. self.index[id] = segment, offset
  275. def delete(self, id, wait=True):
  276. if not self._active_txn:
  277. self.get_index(self.get_transaction_id())
  278. self._active_txn = True
  279. try:
  280. segment, offset = self.index.pop(id)
  281. self.segments[segment] -= 1
  282. self.compact.add(segment)
  283. segment = self.io.write_delete(id)
  284. self.compact.add(segment)
  285. self.segments.setdefault(segment, 0)
  286. except KeyError:
  287. raise self.DoesNotExist(self.path)
  288. def preload(self, ids):
  289. """Preload objects (only applies to remote repositories
  290. """
  291. class LoggedIO(object):
  292. header_fmt = struct.Struct('<IIB')
  293. assert header_fmt.size == 9
  294. put_header_fmt = struct.Struct('<IIB32s')
  295. assert put_header_fmt.size == 41
  296. header_no_crc_fmt = struct.Struct('<IB')
  297. assert header_no_crc_fmt.size == 5
  298. crc_fmt = struct.Struct('<I')
  299. assert crc_fmt.size == 4
  300. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  301. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  302. def __init__(self, path, limit, segments_per_dir, capacity=100):
  303. self.path = path
  304. self.fds = LRUCache(capacity)
  305. self.segment = 0
  306. self.limit = limit
  307. self.segments_per_dir = segments_per_dir
  308. self.offset = 0
  309. self._write_fd = None
  310. def close(self):
  311. for segment in list(self.fds.keys()):
  312. self.fds.pop(segment).close()
  313. self.close_segment()
  314. self.fds = None # Just to make sure we're disabled
  315. def segment_iterator(self, reverse=False):
  316. for dirpath, dirs, filenames in os.walk(os.path.join(self.path, 'data')):
  317. dirs.sort(key=int, reverse=reverse)
  318. filenames = sorted((filename for filename in filenames if filename.isdigit()), key=int, reverse=reverse)
  319. for filename in filenames:
  320. yield int(filename), os.path.join(dirpath, filename)
  321. def get_segments_transaction_id(self, index_transaction_id=0):
  322. """Verify that the transaction id is consistent with the index transaction id
  323. """
  324. for segment, filename in self.segment_iterator(reverse=True):
  325. if index_transaction_id is not None and segment < index_transaction_id:
  326. # The index is newer than any committed transaction found
  327. return -1
  328. if self.is_committed_segment(filename):
  329. return segment
  330. return None
  331. def cleanup(self, transaction_id):
  332. """Delete segment files left by aborted transactions
  333. """
  334. self.segment = transaction_id + 1
  335. for segment, filename in self.segment_iterator(reverse=True):
  336. if segment > transaction_id:
  337. os.unlink(filename)
  338. else:
  339. break
  340. def is_committed_segment(self, filename):
  341. """Check if segment ends with a COMMIT_TAG tag
  342. """
  343. with open(filename, 'rb') as fd:
  344. try:
  345. fd.seek(-self.header_fmt.size, os.SEEK_END)
  346. except Exception as e:
  347. # return False if segment file is empty or too small
  348. if e.errno == errno.EINVAL:
  349. return False
  350. raise e
  351. return fd.read(self.header_fmt.size) == self.COMMIT
  352. def segment_filename(self, segment):
  353. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  354. def get_write_fd(self, no_new=False):
  355. if not no_new and self.offset and self.offset > self.limit:
  356. self.close_segment()
  357. if not self._write_fd:
  358. if self.segment % self.segments_per_dir == 0:
  359. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  360. if not os.path.exists(dirname):
  361. os.mkdir(dirname)
  362. self._write_fd = open(self.segment_filename(self.segment), 'ab')
  363. self._write_fd.write(MAGIC)
  364. self.offset = 8
  365. return self._write_fd
  366. def get_fd(self, segment):
  367. try:
  368. return self.fds[segment]
  369. except KeyError:
  370. fd = open(self.segment_filename(segment), 'rb')
  371. self.fds[segment] = fd
  372. return fd
  373. def delete_segment(self, segment):
  374. try:
  375. os.unlink(self.segment_filename(segment))
  376. except OSError:
  377. pass
  378. def iter_objects(self, segment, lookup=None, include_data=False):
  379. fd = self.get_fd(segment)
  380. fd.seek(0)
  381. if fd.read(8) != MAGIC:
  382. raise IntegrityError('Invalid segment header')
  383. offset = 8
  384. header = fd.read(self.header_fmt.size)
  385. while header:
  386. crc, size, tag = self.header_fmt.unpack(header)
  387. if size > MAX_OBJECT_SIZE:
  388. raise IntegrityError('Invalid segment object size')
  389. rest = fd.read(size - self.header_fmt.size)
  390. if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  391. raise IntegrityError('Segment checksum mismatch')
  392. if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
  393. raise IntegrityError('Invalid segment entry header')
  394. key = None
  395. if tag in (TAG_PUT, TAG_DELETE):
  396. key = rest[:32]
  397. if not lookup or lookup(tag, key):
  398. if include_data:
  399. yield tag, key, rest[32:]
  400. else:
  401. yield tag, key, offset
  402. offset += size
  403. header = fd.read(self.header_fmt.size)
  404. def recover_segment(self, segment, filename):
  405. self.fds.pop(segment).close()
  406. # FIXME: save a copy of the original file
  407. with open(filename, 'rb') as fd:
  408. data = memoryview(fd.read())
  409. os.rename(filename, filename + '.beforerecover')
  410. print('attempting to recover ' + filename, file=sys.stderr)
  411. with open(filename, 'wb') as fd:
  412. fd.write(MAGIC)
  413. while len(data) >= self.header_fmt.size:
  414. crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
  415. if size > len(data):
  416. data = data[1:]
  417. continue
  418. if crc32(data[4:size]) & 0xffffffff != crc:
  419. data = data[1:]
  420. continue
  421. fd.write(data[:size])
  422. data = data[size:]
  423. def read(self, segment, offset, id):
  424. if segment == self.segment and self._write_fd:
  425. self._write_fd.flush()
  426. fd = self.get_fd(segment)
  427. fd.seek(offset)
  428. header = fd.read(self.put_header_fmt.size)
  429. crc, size, tag, key = self.put_header_fmt.unpack(header)
  430. if size > MAX_OBJECT_SIZE:
  431. raise IntegrityError('Invalid segment object size')
  432. data = fd.read(size - self.put_header_fmt.size)
  433. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  434. raise IntegrityError('Segment checksum mismatch')
  435. if tag != TAG_PUT or id != key:
  436. raise IntegrityError('Invalid segment entry header')
  437. return data
  438. def write_put(self, id, data):
  439. size = len(data) + self.put_header_fmt.size
  440. fd = self.get_write_fd()
  441. offset = self.offset
  442. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  443. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  444. fd.write(b''.join((crc, header, id, data)))
  445. self.offset += size
  446. return self.segment, offset
  447. def write_delete(self, id):
  448. fd = self.get_write_fd()
  449. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  450. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  451. fd.write(b''.join((crc, header, id)))
  452. self.offset += self.put_header_fmt.size
  453. return self.segment
  454. def write_commit(self):
  455. fd = self.get_write_fd(no_new=True)
  456. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  457. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  458. fd.write(b''.join((crc, header)))
  459. self.close_segment()
  460. def close_segment(self):
  461. if self._write_fd:
  462. self.segment += 1
  463. self.offset = 0
  464. os.fsync(self._write_fd)
  465. self._write_fd.close()
  466. self._write_fd = None