repository.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. from configparser import RawConfigParser
  2. from binascii import hexlify
  3. import fcntl
  4. import os
  5. import re
  6. import shutil
  7. import struct
  8. import tempfile
  9. import unittest
  10. from zlib import crc32
  11. from .hashindex import NSIndex
  12. from .helpers import IntegrityError, read_msgpack, write_msgpack, unhexlify
  13. from .lrucache import LRUCache
  14. MAX_OBJECT_SIZE = 20 * 1024 * 1024
  15. TAG_PUT = 0
  16. TAG_DELETE = 1
  17. TAG_COMMIT = 2
  18. class Repository(object):
  19. """Filesystem based transactional key value store
  20. On disk layout:
  21. dir/README
  22. dir/config
  23. dir/data/<X / SEGMENTS_PER_DIR>/<X>
  24. dir/index.X
  25. dir/hints.X
  26. """
  27. DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
  28. DEFAULT_SEGMENTS_PER_DIR = 10000
  29. class DoesNotExist(KeyError):
  30. """Requested key does not exist"""
  31. class AlreadyExists(KeyError):
  32. """Requested key does not exist"""
  33. def __init__(self, path, create=False):
  34. self.io = None
  35. self.lock_fd = None
  36. if create:
  37. self.create(path)
  38. self.open(path)
  39. def __del__(self):
  40. self.close()
  41. def create(self, path):
  42. """Create a new empty repository at `path`
  43. """
  44. if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
  45. raise self.AlreadyExists(path)
  46. if not os.path.exists(path):
  47. os.mkdir(path)
  48. with open(os.path.join(path, 'README'), 'w') as fd:
  49. fd.write('This is a DARC repository\n')
  50. os.mkdir(os.path.join(path, 'data'))
  51. config = RawConfigParser()
  52. config.add_section('repository')
  53. config.set('repository', 'version', '1')
  54. config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
  55. config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
  56. config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
  57. with open(os.path.join(path, 'config'), 'w') as fd:
  58. config.write(fd)
  59. def open(self, path):
  60. self.head = None
  61. self.path = path
  62. if not os.path.isdir(path):
  63. raise self.DoesNotExist(path)
  64. self.lock_fd = open(os.path.join(path, 'README'), 'r+')
  65. fcntl.flock(self.lock_fd, fcntl.LOCK_EX)
  66. self.config = RawConfigParser()
  67. self.config.read(os.path.join(self.path, 'config'))
  68. if self.config.getint('repository', 'version') != 1:
  69. raise Exception('%s Does not look like a darc repository')
  70. self.max_segment_size = self.config.getint('repository', 'max_segment_size')
  71. self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
  72. self.id = unhexlify(self.config.get('repository', 'id').strip())
  73. self.rollback()
  74. def close(self):
  75. if self.lock_fd:
  76. self.rollback()
  77. self.lock_fd.close()
  78. self.lock_fd = None
  79. def commit(self, rollback=True):
  80. """Commit transaction
  81. """
  82. self.io.write_commit()
  83. self.compact_segments()
  84. self.write_index()
  85. self.rollback()
  86. def _available_indices(self, reverse=False):
  87. names = [int(name[6:]) for name in os.listdir(self.path) if re.match('index\.\d+', name)]
  88. names.sort(reverse=reverse)
  89. return names
  90. def open_index(self, head, read_only=False):
  91. if head is None:
  92. self.index = NSIndex.create(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  93. self.segments = {}
  94. self.compact = set()
  95. else:
  96. if read_only:
  97. self.index = NSIndex((os.path.join(self.path, 'index.%d') % head).encode('utf-8'))
  98. else:
  99. shutil.copy(os.path.join(self.path, 'index.%d' % head),
  100. os.path.join(self.path, 'index.tmp'))
  101. self.index = NSIndex(os.path.join(self.path, 'index.tmp').encode('utf-8'))
  102. hints = read_msgpack(os.path.join(self.path, 'hints.%d' % head))
  103. if hints[b'version'] != 1:
  104. raise ValueError('Unknown hints file version: %d' % hints['version'])
  105. self.segments = hints[b'segments']
  106. self.compact = set(hints[b'compact'])
  107. def write_index(self):
  108. hints = {b'version': 1,
  109. b'segments': self.segments,
  110. b'compact': list(self.compact)}
  111. write_msgpack(os.path.join(self.path, 'hints.%d' % self.io.head), hints)
  112. self.index.flush()
  113. os.rename(os.path.join(self.path, 'index.tmp'),
  114. os.path.join(self.path, 'index.%d' % self.io.head))
  115. # Remove old indices
  116. current = '.%d' % self.io.head
  117. for name in os.listdir(self.path):
  118. if not name.startswith('index.') and not name.startswith('hints.'):
  119. continue
  120. if name.endswith(current):
  121. continue
  122. os.unlink(os.path.join(self.path, name))
  123. def compact_segments(self):
  124. """Compact sparse segments by copying data into new segments
  125. """
  126. if not self.compact:
  127. return
  128. def lookup(tag, key):
  129. return tag == TAG_PUT and self.index.get(key, (-1, -1))[0] == segment
  130. segments = self.segments
  131. for segment in sorted(self.compact):
  132. if segments[segment] > 0:
  133. for tag, key, data in self.io.iter_objects(segment, lookup, include_data=True):
  134. new_segment, offset = self.io.write_put(key, data)
  135. self.index[key] = new_segment, offset
  136. segments.setdefault(new_segment, 0)
  137. segments[new_segment] += 1
  138. segments[segment] -= 1
  139. assert segments[segment] == 0
  140. self.io.write_commit()
  141. for segment in self.compact:
  142. assert self.segments.pop(segment) == 0
  143. self.io.delete_segment(segment)
  144. self.compact = set()
  145. def recover(self, path):
  146. """Recover missing index by replaying logs"""
  147. start = None
  148. available = self._available_indices()
  149. if available:
  150. start = available[-1]
  151. self.open_index(start)
  152. for segment, filename in self.io._segment_names():
  153. if start is not None and segment <= start:
  154. continue
  155. self.segments[segment] = 0
  156. for tag, key, offset in self.io.iter_objects(segment):
  157. if tag == TAG_PUT:
  158. try:
  159. s, _ = self.index[key]
  160. self.compact.add(s)
  161. self.segments[s] -= 1
  162. except KeyError:
  163. pass
  164. self.index[key] = segment, offset
  165. self.segments[segment] += 1
  166. elif tag == TAG_DELETE:
  167. try:
  168. s, _ = self.index.pop(key)
  169. self.segments[s] -= 1
  170. self.compact.add(s)
  171. self.compact.add(segment)
  172. except KeyError:
  173. pass
  174. if self.segments[segment] == 0:
  175. self.compact.add(segment)
  176. if self.io.head is not None:
  177. self.write_index()
  178. def rollback(self):
  179. """
  180. """
  181. self._active_txn = False
  182. if self.io:
  183. self.io.close()
  184. self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
  185. if self.io.head is not None and not os.path.exists(os.path.join(self.path, 'index.%d' % self.io.head)):
  186. self.recover(self.path)
  187. self.open_index(self.io.head, read_only=True)
  188. def _len(self):
  189. return len(self.index)
  190. def get(self, id):
  191. try:
  192. segment, offset = self.index[id]
  193. return self.io.read(segment, offset, id)
  194. except KeyError:
  195. raise self.DoesNotExist
  196. def get_many(self, ids, peek=None):
  197. for id in ids:
  198. yield self.get(id)
  199. def put(self, id, data, wait=True):
  200. if not self._active_txn:
  201. self._active_txn = True
  202. self.open_index(self.io.head)
  203. try:
  204. segment, _ = self.index[id]
  205. self.segments[segment] -= 1
  206. self.compact.add(segment)
  207. segment = self.io.write_delete(id)
  208. self.segments.setdefault(segment, 0)
  209. self.compact.add(segment)
  210. except KeyError:
  211. pass
  212. segment, offset = self.io.write_put(id, data)
  213. self.segments.setdefault(segment, 0)
  214. self.segments[segment] += 1
  215. self.index[id] = segment, offset
  216. def delete(self, id, wait=True):
  217. if not self._active_txn:
  218. self._active_txn = True
  219. self.open_index(self.io.head)
  220. try:
  221. segment, offset = self.index.pop(id)
  222. self.segments[segment] -= 1
  223. self.compact.add(segment)
  224. segment = self.io.write_delete(id)
  225. self.compact.add(segment)
  226. self.segments.setdefault(segment, 0)
  227. except KeyError:
  228. raise self.DoesNotExist
  229. def add_callback(self, cb, data):
  230. cb(None, None, data)
  231. class LoggedIO(object):
  232. header_fmt = struct.Struct('<IIB')
  233. assert header_fmt.size == 9
  234. put_header_fmt = struct.Struct('<IIB32s')
  235. assert put_header_fmt.size == 41
  236. header_no_crc_fmt = struct.Struct('<IB')
  237. assert header_no_crc_fmt.size == 5
  238. crc_fmt = struct.Struct('<I')
  239. assert crc_fmt.size == 4
  240. _commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
  241. COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
  242. def __init__(self, path, limit, segments_per_dir, capacity=100):
  243. self.path = path
  244. self.fds = LRUCache(capacity)
  245. self.segment = None
  246. self.limit = limit
  247. self.segments_per_dir = segments_per_dir
  248. self.offset = 0
  249. self._write_fd = None
  250. self.head = None
  251. self.cleanup()
  252. def close(self):
  253. for segment in list(self.fds.keys()):
  254. self.fds.pop(segment).close()
  255. self.close_segment()
  256. self.fds = None # Just to make sure we're disabled
  257. def _segment_names(self, reverse=False):
  258. for dirpath, dirs, filenames in os.walk(os.path.join(self.path, 'data')):
  259. dirs.sort(key=int, reverse=reverse)
  260. filenames.sort(key=int, reverse=reverse)
  261. for filename in filenames:
  262. yield int(filename), os.path.join(dirpath, filename)
  263. def cleanup(self):
  264. """Delete segment files left by aborted transactions
  265. """
  266. self.head = None
  267. self.segment = 0
  268. for segment, filename in self._segment_names(reverse=True):
  269. if self.is_complete_segment(filename):
  270. self.head = segment
  271. self.segment = self.head + 1
  272. return
  273. else:
  274. os.unlink(filename)
  275. def is_complete_segment(self, filename):
  276. with open(filename, 'rb') as fd:
  277. fd.seek(-self.header_fmt.size, 2)
  278. return fd.read(self.header_fmt.size) == self.COMMIT
  279. def segment_filename(self, segment):
  280. return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
  281. def get_write_fd(self, no_new=False):
  282. if not no_new and self.offset and self.offset > self.limit:
  283. self.close_segment()
  284. if not self._write_fd:
  285. if self.segment % self.segments_per_dir == 0:
  286. dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
  287. if not os.path.exists(dirname):
  288. os.mkdir(dirname)
  289. self._write_fd = open(self.segment_filename(self.segment), 'ab')
  290. self._write_fd.write(b'DSEGMENT')
  291. self.offset = 8
  292. return self._write_fd
  293. def get_fd(self, segment):
  294. try:
  295. return self.fds[segment]
  296. except KeyError:
  297. fd = open(self.segment_filename(segment), 'rb')
  298. self.fds[segment] = fd
  299. return fd
  300. def delete_segment(self, segment):
  301. try:
  302. os.unlink(self.segment_filename(segment))
  303. except OSError:
  304. pass
  305. def iter_objects(self, segment, lookup=None, include_data=False):
  306. fd = self.get_fd(segment)
  307. fd.seek(0)
  308. if fd.read(8) != b'DSEGMENT':
  309. raise IntegrityError('Invalid segment header')
  310. offset = 8
  311. header = fd.read(self.header_fmt.size)
  312. while header:
  313. crc, size, tag = self.header_fmt.unpack(header)
  314. if size > MAX_OBJECT_SIZE:
  315. raise IntegrityError('Invalid segment object size')
  316. rest = fd.read(size - self.header_fmt.size)
  317. if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  318. raise IntegrityError('Segment checksum mismatch')
  319. if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
  320. raise IntegrityError('Invalid segment entry header')
  321. key = None
  322. if tag in (TAG_PUT, TAG_DELETE):
  323. key = rest[:32]
  324. if not lookup or lookup(tag, key):
  325. if include_data:
  326. yield tag, key, rest[32:]
  327. else:
  328. yield tag, key, offset
  329. offset += size
  330. header = fd.read(self.header_fmt.size)
  331. def read(self, segment, offset, id):
  332. if segment == self.segment:
  333. self._write_fd.flush()
  334. fd = self.get_fd(segment)
  335. fd.seek(offset)
  336. header = fd.read(self.put_header_fmt.size)
  337. crc, size, tag, key = self.put_header_fmt.unpack(header)
  338. if size > MAX_OBJECT_SIZE:
  339. raise IntegrityError('Invalid segment object size')
  340. data = fd.read(size - self.put_header_fmt.size)
  341. if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
  342. raise IntegrityError('Segment checksum mismatch')
  343. if tag != TAG_PUT or id != key:
  344. raise IntegrityError('Invalid segment entry header')
  345. return data
  346. def write_put(self, id, data):
  347. size = len(data) + self.put_header_fmt.size
  348. fd = self.get_write_fd()
  349. offset = self.offset
  350. header = self.header_no_crc_fmt.pack(size, TAG_PUT)
  351. crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
  352. fd.write(b''.join((crc, header, id, data)))
  353. self.offset += size
  354. return self.segment, offset
  355. def write_delete(self, id):
  356. fd = self.get_write_fd()
  357. header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
  358. crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
  359. fd.write(b''.join((crc, header, id)))
  360. self.offset += self.put_header_fmt.size
  361. return self.segment
  362. def write_commit(self):
  363. fd = self.get_write_fd(no_new=True)
  364. header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
  365. crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
  366. fd.write(b''.join((crc, header)))
  367. self.head = self.segment
  368. self.close_segment()
  369. def close_segment(self):
  370. if self._write_fd:
  371. self.segment += 1
  372. self.offset = 0
  373. os.fsync(self._write_fd)
  374. self._write_fd.close()
  375. self._write_fd = None