2
0

repository.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. from configparser import RawConfigParser
  2. from binascii import hexlify
  3. import errno
  4. import os
  5. import re
  6. import shutil
  7. import struct
  8. import sys
  9. import time
  10. from zlib import crc32
  11. from .hashindex import NSIndex
  12. from .helpers import Error, IntegrityError, read_msgpack, write_msgpack, unhexlify, UpgradableLock
  13. from .lrucache import LRUCache
  14. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  15. MAGIC = b'ATTICSEG'
  16. TAG_PUT = 0
  17. TAG_DELETE = 1
  18. TAG_COMMIT = 2
  19. class Repository(object):
  20. """Filesystem based transactional key value store
  21. On disk layout:
  22. dir/README
  23. dir/config
  24. dir/data/<X / SEGMENTS_PER_DIR>/<X>
  25. dir/index.X
  26. dir/hints.X
  27. """
  28. DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
  29. DEFAULT_SEGMENTS_PER_DIR = 10000
  30. class DoesNotExist(Error):
  31. """Repository {} does not exist"""
  32. class AlreadyExists(Error):
  33. """Repository {} already exists"""
  34. class InvalidRepository(Error):
  35. """{} is not a valid repository"""
  36. class CheckNeeded(Error):
  37. '''Inconsistency detected. Please "run attic check {}"'''
  38. def __init__(self, path, create=False):
  39. self.path = path
  40. self.io = None
  41. self.lock = None
  42. self.index = None
  43. if create:
  44. self.create(path)
  45. self.open(path)
  46. def __del__(self):
  47. self.close()
  48. def create(self, path):
  49. """Create a new empty repository at `path`
  50. """
  51. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  52. raise self.AlreadyExists(path)
  53. if not os.path.exists(path):
  54. os.mkdir(path)
  55. with open(os.path.join(path, 'README'), 'w') as fd:
  56. fd.write('This is an Attic repository\n')
  57. os.mkdir(os.path.join(path, 'data'))
  58. config = RawConfigParser()
  59. config.add_section('repository')
  60. config.set('repository', 'version', '1')
  61. config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
  62. config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
  63. config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
  64. with open(os.path.join(path, 'config'), 'w') as fd:
  65. config.write(fd)
  66. def get_index_transaction_id(self):
  67. indicies = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
  68. if indicies:
  69. return indicies[-1]
  70. else:
  71. return None
  72. def open(self, path):
  73. self.path = path
  74. if not os.path.isdir(path):
  75. raise self.DoesNotExist(path)
  76. self.config = RawConfigParser()
  77. self.config.read(os.path.join(self.path, 'config'))
  78. if not 'repository' in self.config.sections() or self.config.getint('repository', 'version') != 1:
  79. raise self.InvalidRepository(path)
  80. self.lock = UpgradableLock(os.path.join(path, 'config'))
  81. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  82. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  83. self.id = unhexlify(self.config.get('repository', 'id').strip())
  84. self.rollback()
  85. def close(self):
  86. if self.lock:
  87. if self.io:
  88. self.io.close()
  89. self.io = None
  90. self.lock.release()
  91. self.lock = None
  92. def commit(self):
  93. """Commit transaction
  94. """
  95. self.io.write_commit()
  96. self.compact_segments()
  97. self.write_index()
  98. self.rollback()
  99. def open_index(self, head, read_only=False):
  100. if head is None:
  101. self.lock.upgrade()
  102. self.index = NSIndex.create(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  103. self.segments = {}
  104. self.compact = set()
  105. else:
  106. if read_only:
  107. self.index = NSIndex((os.path.join(self.path, 'index.%d') % head).encode('utf-8'), readonly=True)
  108. else:
  109. self.lock.upgrade()
  110. self.io.cleanup()
  111. shutil.copy(os.path.join(self.path, 'index.%d' % head),
  112. os.path.join(self.path, 'index.tmp'))
  113. self.index = NSIndex(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  114. hints = read_msgpack(os.path.join(self.path, 'hints.%d' % head))
  115. if hints[b'version'] != 1:
  116. raise ValueError('Unknown hints file version: %d' % hints['version'])
  117. self.segments = hints[b'segments']
  118. self.compact = set(hints[b'compact'])
  119. def write_index(self):
  120. hints = {b'version': 1,
  121. b'segments': self.segments,
  122. b'compact': list(self.compact)}
  123. write_msgpack(os.path.join(self.path, 'hints.%d' % self.io.head), hints)
  124. self.index.flush()
  125. os.rename(os.path.join(self.path, 'index.tmp'),
  126. os.path.join(self.path, 'index.%d' % self.io.head))
  127. # Remove old indices
  128. current = '.%d' % self.io.head
  129. for name in os.listdir(self.path):
  130. if not name.startswith('index.') and not name.startswith('hints.'):
  131. continue
  132. if name.endswith(current):
  133. continue
  134. os.unlink(os.path.join(self.path, name))
  135. def compact_segments(self):
  136. """Compact sparse segments by copying data into new segments
  137. """
  138. if not self.compact:
  139. return
  140. def lookup(tag, key):
  141. return tag == TAG_PUT and self.index.get(key, (-1, -1))[0] == segment
  142. segments = self.segments
  143. for segment in sorted(self.compact):
  144. if segments[segment] > 0:
  145. for tag, key, data in self.io.iter_objects(segment, lookup, include_data=True):
  146. new_segment, offset = self.io.write_put(key, data)
  147. self.index[key] = new_segment, offset
  148. segments.setdefault(new_segment, 0)
  149. segments[new_segment] += 1
  150. segments[segment] -= 1
  151. assert segments[segment] == 0
  152. self.io.write_commit()
  153. for segment in self.compact:
  154. assert self.segments.pop(segment) == 0
  155. self.io.delete_segment(segment)
  156. self.compact = set()
  157. def recover(self, path):
  158. """Recover missing index by replaying logs"""
  159. start = None
  160. available = self._available_indices()
  161. if available:
  162. start = available[-1]
  163. self.open_index(start)
  164. for segment, filename in self.io._segment_names():
  165. if start is not None and segment <= start:
  166. continue
  167. self.segments[segment] = 0
  168. for tag, key, offset in self.io.iter_objects(segment):
  169. if tag == TAG_PUT:
  170. try:
  171. s, _ = self.index[key]
  172. self.compact.add(s)
  173. self.segments[s] -= 1
  174. except KeyError:
  175. pass
  176. self.index[key] = segment, offset
  177. self.segments[segment] += 1
  178. elif tag == TAG_DELETE:
  179. try:
  180. s, _ = self.index.pop(key)
  181. self.segments[s] -= 1
  182. self.compact.add(s)
  183. self.compact.add(segment)
  184. except KeyError:
  185. pass
  186. if self.segments[segment] == 0:
  187. self.compact.add(segment)
  188. if self.io.head is not None:
  189. self.write_index()
  190. def check(self, progress=False):
  191. """Check repository consistency
  192. This method verifies all segment checksums and makes sure
  193. the index is consistent with the data stored in the segments.
  194. """
  195. if not self.index:
  196. self.open_index(self.io.head, read_only=True)
  197. progress_time = None
  198. error_found = False
  199. def report_progress(msg, error=False):
  200. nonlocal error_found
  201. if error:
  202. error_found = True
  203. if error or progress:
  204. print(msg, file=sys.stderr)
  205. seen = set()
  206. for segment, filename in self.io.segment_iterator():
  207. if progress:
  208. if int(time.time()) != progress_time:
  209. progress_time = int(time.time())
  210. report_progress('Checking segment {}/{}'.format(segment, self.io.head))
  211. try:
  212. objects = list(self.io.iter_objects(segment))
  213. except (IntegrityError, struct.error):
  214. report_progress('Error reading segment {}'.format(segment), error=True)
  215. objects = []
  216. for tag, key, offset in objects:
  217. if tag == TAG_PUT:
  218. if key in seen:
  219. report_progress('Key found in more than one segment. Segment={}, key={}'.format(segment, hexlify(key)), error=True)
  220. seen.add(key)
  221. if self.index.get(key, (0, 0)) != (segment, offset):
  222. report_progress('Index vs segment header mismatch. Segment={}, key={}'.format(segment, hexlify(key)), error=True)
  223. elif tag == TAG_COMMIT:
  224. continue
  225. else:
  226. report_progress('Unexpected tag {} in segment {}'.format(tag, segment), error=True)
  227. if len(self.index) != len(seen):
  228. report_progress('Index object count mismatch. {} != {}'.format(len(self.index), len(seen)), error=True)
  229. if not error_found:
  230. report_progress('Check complete, no errors found.')
  231. return not error_found
  232. def rollback(self):
  233. """
  234. """
  235. if self.io:
  236. self.io.close()
  237. self.io = None
  238. self.index = None
  239. self._active_txn = False
  240. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir, self.get_index_transaction_id())
  241. def _len(self):
  242. if not self.index:
  243. self.open_index(self.io.head, read_only=True)
  244. return len(self.index)
  245. def get(self, id_):
  246. if not self.index:
  247. self.open_index(self.io.head, read_only=True)
  248. try:
  249. segment, offset = self.index[id_]
  250. return self.io.read(segment, offset, id_)
  251. except KeyError:
  252. raise self.DoesNotExist(self.path)
  253. def get_many(self, ids, is_preloaded=False):
  254. for id_ in ids:
  255. yield self.get(id_)
  256. def put(self, id, data, wait=True):
  257. if not self._active_txn:
  258. self._active_txn = True
  259. self.open_index(self.io.head)
  260. try:
  261. segment, _ = self.index[id]
  262. self.segments[segment] -= 1
  263. self.compact.add(segment)
  264. segment = self.io.write_delete(id)
  265. self.segments.setdefault(segment, 0)
  266. self.compact.add(segment)
  267. except KeyError:
  268. pass
  269. segment, offset = self.io.write_put(id, data)
  270. self.segments.setdefault(segment, 0)
  271. self.segments[segment] += 1
  272. self.index[id] = segment, offset
  273. def delete(self, id, wait=True):
  274. if not self._active_txn:
  275. self._active_txn = True
  276. self.open_index(self.io.head)
  277. try:
  278. segment, offset = self.index.pop(id)
  279. self.segments[segment] -= 1
  280. self.compact.add(segment)
  281. segment = self.io.write_delete(id)
  282. self.compact.add(segment)
  283. self.segments.setdefault(segment, 0)
  284. except KeyError:
  285. raise self.DoesNotExist(self.path)
  286. def preload(self, ids):
  287. """Preload objects (only applies to remote repositories
  288. """
  289. class LoggedIO(object):
  290. header_fmt = struct.Struct('<IIB')
  291. assert header_fmt.size == 9
  292. put_header_fmt = struct.Struct('<IIB32s')
  293. assert put_header_fmt.size == 41
  294. header_no_crc_fmt = struct.Struct('<IB')
  295. assert header_no_crc_fmt.size == 5
  296. crc_fmt = struct.Struct('<I')
  297. assert crc_fmt.size == 4
  298. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  299. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  300. def __init__(self, path, limit, segments_per_dir, latest_index, capacity=100):
  301. self.path = path
  302. self.fds = LRUCache(capacity)
  303. self.segment = None
  304. self.limit = limit
  305. self.segments_per_dir = segments_per_dir
  306. self.offset = 0
  307. self._write_fd = None
  308. self.head = None
  309. self.verify_segments_head(latest_index)
  310. def close(self):
  311. for segment in list(self.fds.keys()):
  312. self.fds.pop(segment).close()
  313. self.close_segment()
  314. self.fds = None # Just to make sure we're disabled
  315. def segment_iterator(self, reverse=False):
  316. for dirpath, dirs, filenames in os.walk(os.path.join(self.path, 'data')):
  317. dirs.sort(key=int, reverse=reverse)
  318. filenames = sorted((filename for filename in filenames if filename.isdigit()), key=int, reverse=reverse)
  319. for filename in filenames:
  320. yield int(filename), os.path.join(dirpath, filename)
  321. def verify_segments_head(self, latest_index):
  322. """Verify that the transaction id is consistent with the index transaction id
  323. """
  324. self.segment = 0
  325. for segment, filename in self.segment_iterator(reverse=True):
  326. if latest_index is None or segment < latest_index:
  327. # The index is newer than any committed transaction found
  328. raise Repository.CheckNeeded()
  329. if self.is_committed_segment(filename):
  330. if segment > latest_index:
  331. # The committed transaction found is newer than the index
  332. raise Repository.CheckNeeded()
  333. self.head = segment
  334. self.segment = self.head + 1
  335. break
  336. else:
  337. if latest_index is not None:
  338. # An index has been found but no committed transaction
  339. raise Repository.CheckNeeded()
  340. def cleanup(self):
  341. """Delete segment files left by aborted transactions
  342. """
  343. for segment, filename in self.segment_iterator(reverse=True):
  344. if segment > self.head:
  345. os.unlink(filename)
  346. else:
  347. break
  348. def is_committed_segment(self, filename):
  349. """Check if segment ends with a COMMIT_TAG tag
  350. """
  351. with open(filename, 'rb') as fd:
  352. try:
  353. fd.seek(-self.header_fmt.size, os.SEEK_END)
  354. except Exception as e:
  355. # return False if segment file is empty or too small
  356. if e.errno == errno.EINVAL:
  357. return False
  358. raise e
  359. return fd.read(self.header_fmt.size) == self.COMMIT
  360. def segment_filename(self, segment):
  361. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  362. def get_write_fd(self, no_new=False):
  363. if not no_new and self.offset and self.offset > self.limit:
  364. self.close_segment()
  365. if not self._write_fd:
  366. if self.segment % self.segments_per_dir == 0:
  367. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  368. if not os.path.exists(dirname):
  369. os.mkdir(dirname)
  370. self._write_fd = open(self.segment_filename(self.segment), 'ab')
  371. self._write_fd.write(MAGIC)
  372. self.offset = 8
  373. return self._write_fd
  374. def get_fd(self, segment):
  375. try:
  376. return self.fds[segment]
  377. except KeyError:
  378. fd = open(self.segment_filename(segment), 'rb')
  379. self.fds[segment] = fd
  380. return fd
  381. def delete_segment(self, segment):
  382. try:
  383. os.unlink(self.segment_filename(segment))
  384. except OSError:
  385. pass
  386. def iter_objects(self, segment, lookup=None, include_data=False):
  387. fd = self.get_fd(segment)
  388. fd.seek(0)
  389. if fd.read(8) != MAGIC:
  390. raise IntegrityError('Invalid segment header')
  391. offset = 8
  392. header = fd.read(self.header_fmt.size)
  393. while header:
  394. crc, size, tag = self.header_fmt.unpack(header)
  395. if size > MAX_OBJECT_SIZE:
  396. raise IntegrityError('Invalid segment object size')
  397. rest = fd.read(size - self.header_fmt.size)
  398. if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  399. raise IntegrityError('Segment checksum mismatch')
  400. if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
  401. raise IntegrityError('Invalid segment entry header')
  402. key = None
  403. if tag in (TAG_PUT, TAG_DELETE):
  404. key = rest[:32]
  405. if not lookup or lookup(tag, key):
  406. if include_data:
  407. yield tag, key, rest[32:]
  408. else:
  409. yield tag, key, offset
  410. offset += size
  411. header = fd.read(self.header_fmt.size)
  412. def read(self, segment, offset, id):
  413. if segment == self.segment:
  414. self._write_fd.flush()
  415. fd = self.get_fd(segment)
  416. fd.seek(offset)
  417. header = fd.read(self.put_header_fmt.size)
  418. crc, size, tag, key = self.put_header_fmt.unpack(header)
  419. if size > MAX_OBJECT_SIZE:
  420. raise IntegrityError('Invalid segment object size')
  421. data = fd.read(size - self.put_header_fmt.size)
  422. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  423. raise IntegrityError('Segment checksum mismatch')
  424. if tag != TAG_PUT or id != key:
  425. raise IntegrityError('Invalid segment entry header')
  426. return data
  427. def write_put(self, id, data):
  428. size = len(data) + self.put_header_fmt.size
  429. fd = self.get_write_fd()
  430. offset = self.offset
  431. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  432. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  433. fd.write(b''.join((crc, header, id, data)))
  434. self.offset += size
  435. return self.segment, offset
  436. def write_delete(self, id):
  437. fd = self.get_write_fd()
  438. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  439. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  440. fd.write(b''.join((crc, header, id)))
  441. self.offset += self.put_header_fmt.size
  442. return self.segment
  443. def write_commit(self):
  444. fd = self.get_write_fd(no_new=True)
  445. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  446. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  447. fd.write(b''.join((crc, header)))
  448. self.head = self.segment
  449. self.close_segment()
  450. def close_segment(self):
  451. if self._write_fd:
  452. self.segment += 1
  453. self.offset = 0
  454. os.fsync(self._write_fd)
  455. self._write_fd.close()
  456. self._write_fd = None