archive.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. from datetime import datetime, timedelta, timezone
  2. from getpass import getuser
  3. from itertools import groupby
  4. import errno
  5. import shutil
  6. import tempfile
  7. from attic.key import key_factory
  8. from attic.remote import cache_if_remote
  9. import msgpack
  10. import os
  11. import socket
  12. import stat
  13. import sys
  14. import time
  15. from io import BytesIO
  16. from attic import xattr
  17. from attic.platform import acl_get, acl_set
  18. from attic.chunker import chunkify
  19. from attic.hashindex import ChunkIndex
  20. from attic.helpers import Error, uid2user, user2uid, gid2group, group2gid, \
  21. Manifest, Statistics, decode_dict, st_mtime_ns, make_path_safe, StableDict
  22. ITEMS_BUFFER = 1024 * 1024
  23. CHUNK_MIN = 1024
  24. WINDOW_SIZE = 0xfff
  25. CHUNK_MASK = 0xffff
  26. utime_supports_fd = os.utime in getattr(os, 'supports_fd', {})
  27. has_mtime_ns = sys.version >= '3.3'
  28. has_lchmod = hasattr(os, 'lchmod')
  29. has_lchflags = hasattr(os, 'lchflags')
  30. class DownloadPipeline:
  31. def __init__(self, repository, key):
  32. self.repository = repository
  33. self.key = key
  34. def unpack_many(self, ids, filter=None, preload=False):
  35. unpacker = msgpack.Unpacker(use_list=False)
  36. for data in self.fetch_many(ids):
  37. unpacker.feed(data)
  38. items = [decode_dict(item, (b'path', b'source', b'user', b'group')) for item in unpacker]
  39. if filter:
  40. items = [item for item in items if filter(item)]
  41. if preload:
  42. for item in items:
  43. if b'chunks' in item:
  44. self.repository.preload([c[0] for c in item[b'chunks']])
  45. for item in items:
  46. yield item
  47. def fetch_many(self, ids, is_preloaded=False):
  48. for id_, data in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)):
  49. yield self.key.decrypt(id_, data)
  50. class ChunkBuffer:
  51. BUFFER_SIZE = 1 * 1024 * 1024
  52. def __init__(self, key):
  53. self.buffer = BytesIO()
  54. self.packer = msgpack.Packer(unicode_errors='surrogateescape')
  55. self.chunks = []
  56. self.key = key
  57. def add(self, item):
  58. self.buffer.write(self.packer.pack(StableDict(item)))
  59. if self.is_full():
  60. self.flush()
  61. def write_chunk(self, chunk):
  62. raise NotImplementedError
  63. def flush(self, flush=False):
  64. if self.buffer.tell() == 0:
  65. return
  66. self.buffer.seek(0)
  67. chunks = list(bytes(s) for s in chunkify(self.buffer, WINDOW_SIZE, CHUNK_MASK, CHUNK_MIN, self.key.chunk_seed))
  68. self.buffer.seek(0)
  69. self.buffer.truncate(0)
  70. # Leave the last parital chunk in the buffer unless flush is True
  71. end = None if flush or len(chunks) == 1 else -1
  72. for chunk in chunks[:end]:
  73. self.chunks.append(self.write_chunk(chunk))
  74. if end == -1:
  75. self.buffer.write(chunks[-1])
  76. def is_full(self):
  77. return self.buffer.tell() > self.BUFFER_SIZE
  78. class CacheChunkBuffer(ChunkBuffer):
  79. def __init__(self, cache, key, stats):
  80. super(CacheChunkBuffer, self).__init__(key)
  81. self.cache = cache
  82. self.stats = stats
  83. def write_chunk(self, chunk):
  84. id_, _, _ = self.cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats)
  85. return id_
  86. class Archive:
  87. class DoesNotExist(Error):
  88. """Archive {} does not exist"""
  89. class AlreadyExists(Error):
  90. """Archive {} already exists"""
  91. def __init__(self, repository, key, manifest, name, cache=None, create=False,
  92. checkpoint_interval=300, numeric_owner=False):
  93. self.cwd = os.getcwd()
  94. self.key = key
  95. self.repository = repository
  96. self.cache = cache
  97. self.manifest = manifest
  98. self.hard_links = {}
  99. self.stats = Statistics()
  100. self.name = name
  101. self.checkpoint_interval = checkpoint_interval
  102. self.numeric_owner = numeric_owner
  103. self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
  104. self.pipeline = DownloadPipeline(self.repository, self.key)
  105. if create:
  106. if name in manifest.archives:
  107. raise self.AlreadyExists(name)
  108. self.last_checkpoint = time.time()
  109. i = 0
  110. while True:
  111. self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '')
  112. if not self.checkpoint_name in manifest.archives:
  113. break
  114. i += 1
  115. else:
  116. if name not in self.manifest.archives:
  117. raise self.DoesNotExist(name)
  118. info = self.manifest.archives[name]
  119. self.load(info[b'id'])
  120. def load(self, id):
  121. self.id = id
  122. data = self.key.decrypt(self.id, self.repository.get(self.id))
  123. self.metadata = msgpack.unpackb(data)
  124. if self.metadata[b'version'] != 1:
  125. raise Exception('Unknown archive metadata version')
  126. decode_dict(self.metadata, (b'name', b'hostname', b'username', b'time'))
  127. self.metadata[b'cmdline'] = [arg.decode('utf-8', 'surrogateescape') for arg in self.metadata[b'cmdline']]
  128. self.name = self.metadata[b'name']
  129. @property
  130. def ts(self):
  131. """Timestamp of archive creation in UTC"""
  132. t, f = self.metadata[b'time'].split('.', 1)
  133. return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) + timedelta(seconds=float('.' + f))
  134. def __repr__(self):
  135. return 'Archive(%r)' % self.name
  136. def iter_items(self, filter=None, preload=False):
  137. for item in self.pipeline.unpack_many(self.metadata[b'items'], filter=filter, preload=preload):
  138. yield item
  139. def add_item(self, item):
  140. self.items_buffer.add(item)
  141. if time.time() - self.last_checkpoint > self.checkpoint_interval:
  142. self.write_checkpoint()
  143. self.last_checkpoint = time.time()
  144. def write_checkpoint(self):
  145. self.save(self.checkpoint_name)
  146. del self.manifest.archives[self.checkpoint_name]
  147. self.cache.chunk_decref(self.id, self.stats)
  148. def save(self, name=None):
  149. name = name or self.name
  150. if name in self.manifest.archives:
  151. raise self.AlreadyExists(name)
  152. self.items_buffer.flush(flush=True)
  153. metadata = StableDict({
  154. 'version': 1,
  155. 'name': name,
  156. 'items': self.items_buffer.chunks,
  157. 'cmdline': sys.argv,
  158. 'hostname': socket.gethostname(),
  159. 'username': getuser(),
  160. 'time': datetime.utcnow().isoformat(),
  161. })
  162. data = msgpack.packb(metadata, unicode_errors='surrogateescape')
  163. self.id = self.key.id_hash(data)
  164. self.cache.add_chunk(self.id, data, self.stats)
  165. self.manifest.archives[name] = {'id': self.id, 'time': metadata['time']}
  166. self.manifest.write()
  167. self.repository.commit()
  168. self.cache.commit()
  169. def calc_stats(self, cache):
  170. def add(id):
  171. count, size, csize = self.cache.chunks[id]
  172. stats.update(size, csize, count == 1)
  173. self.cache.chunks[id] = count - 1, size, csize
  174. def add_file_chunks(chunks):
  175. for id, _, _ in chunks:
  176. add(id)
  177. # This function is a bit evil since it abuses the cache to calculate
  178. # the stats. The cache transaction must be rolled back afterwards
  179. unpacker = msgpack.Unpacker(use_list=False)
  180. cache.begin_txn()
  181. stats = Statistics()
  182. add(self.id)
  183. for id, chunk in zip(self.metadata[b'items'], self.repository.get_many(self.metadata[b'items'])):
  184. add(id)
  185. unpacker.feed(self.key.decrypt(id, chunk))
  186. for item in unpacker:
  187. if b'chunks' in item:
  188. stats.nfiles += 1
  189. add_file_chunks(item[b'chunks'])
  190. cache.rollback()
  191. return stats
  192. def extract_item(self, item, restore_attrs=True, dry_run=False):
  193. if dry_run:
  194. if b'chunks' in item:
  195. for _ in self.pipeline.fetch_many([c[0] for c in item[b'chunks']], is_preloaded=True):
  196. pass
  197. return
  198. dest = self.cwd
  199. if item[b'path'].startswith('/') or item[b'path'].startswith('..'):
  200. raise Exception('Path should be relative and local')
  201. path = os.path.join(dest, item[b'path'])
  202. # Attempt to remove existing files, ignore errors on failure
  203. try:
  204. st = os.lstat(path)
  205. if stat.S_ISDIR(st.st_mode):
  206. os.rmdir(path)
  207. else:
  208. os.unlink(path)
  209. except OSError:
  210. pass
  211. mode = item[b'mode']
  212. if stat.S_ISDIR(mode):
  213. if not os.path.exists(path):
  214. os.makedirs(path)
  215. if restore_attrs:
  216. self.restore_attrs(path, item)
  217. elif stat.S_ISREG(mode):
  218. if not os.path.exists(os.path.dirname(path)):
  219. os.makedirs(os.path.dirname(path))
  220. # Hard link?
  221. if b'source' in item:
  222. source = os.path.join(dest, item[b'source'])
  223. if os.path.exists(path):
  224. os.unlink(path)
  225. os.link(source, path)
  226. else:
  227. with open(path, 'wb') as fd:
  228. ids = [c[0] for c in item[b'chunks']]
  229. for data in self.pipeline.fetch_many(ids, is_preloaded=True):
  230. fd.write(data)
  231. fd.flush()
  232. self.restore_attrs(path, item, fd=fd.fileno())
  233. elif stat.S_ISFIFO(mode):
  234. if not os.path.exists(os.path.dirname(path)):
  235. os.makedirs(os.path.dirname(path))
  236. os.mkfifo(path)
  237. self.restore_attrs(path, item)
  238. elif stat.S_ISLNK(mode):
  239. if not os.path.exists(os.path.dirname(path)):
  240. os.makedirs(os.path.dirname(path))
  241. source = item[b'source']
  242. if os.path.exists(path):
  243. os.unlink(path)
  244. os.symlink(source, path)
  245. self.restore_attrs(path, item, symlink=True)
  246. elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
  247. os.mknod(path, item[b'mode'], item[b'rdev'])
  248. self.restore_attrs(path, item)
  249. else:
  250. raise Exception('Unknown archive item type %r' % item[b'mode'])
  251. def restore_attrs(self, path, item, symlink=False, fd=None):
  252. xattrs = item.get(b'xattrs')
  253. if xattrs:
  254. for k, v in xattrs.items():
  255. try:
  256. xattr.setxattr(fd or path, k, v, follow_symlinks=False)
  257. except OSError as e:
  258. if e.errno != errno.ENOTSUP:
  259. raise
  260. uid = gid = None
  261. if not self.numeric_owner:
  262. uid = user2uid(item[b'user'])
  263. gid = group2gid(item[b'group'])
  264. uid = item[b'uid'] if uid is None else uid
  265. gid = item[b'gid'] if gid is None else gid
  266. # This code is a bit of a mess due to os specific differences
  267. try:
  268. if fd:
  269. os.fchown(fd, uid, gid)
  270. else:
  271. os.lchown(path, uid, gid)
  272. except OSError:
  273. pass
  274. if fd:
  275. os.fchmod(fd, item[b'mode'])
  276. elif not symlink:
  277. os.chmod(path, item[b'mode'])
  278. elif has_lchmod: # Not available on Linux
  279. os.lchmod(path, item[b'mode'])
  280. if fd and utime_supports_fd: # Python >= 3.3
  281. os.utime(fd, None, ns=(item[b'mtime'], item[b'mtime']))
  282. elif utime_supports_fd: # Python >= 3.3
  283. os.utime(path, None, ns=(item[b'mtime'], item[b'mtime']), follow_symlinks=False)
  284. elif not symlink:
  285. os.utime(path, (item[b'mtime'] / 10**9, item[b'mtime'] / 10**9))
  286. acl_set(path, item, self.numeric_owner)
  287. # Only available on OS X and FreeBSD
  288. if has_lchflags and b'bsdflags' in item:
  289. try:
  290. os.lchflags(path, item[b'bsdflags'])
  291. except OSError:
  292. pass
  293. def delete(self, stats):
  294. unpacker = msgpack.Unpacker(use_list=False)
  295. for items_id, data in zip(self.metadata[b'items'], self.repository.get_many(self.metadata[b'items'])):
  296. unpacker.feed(self.key.decrypt(items_id, data))
  297. self.cache.chunk_decref(items_id, stats)
  298. for item in unpacker:
  299. if b'chunks' in item:
  300. for chunk_id, size, csize in item[b'chunks']:
  301. self.cache.chunk_decref(chunk_id, stats)
  302. self.cache.chunk_decref(self.id, stats)
  303. del self.manifest.archives[self.name]
  304. def stat_attrs(self, st, path):
  305. item = {
  306. b'mode': st.st_mode,
  307. b'uid': st.st_uid, b'user': uid2user(st.st_uid),
  308. b'gid': st.st_gid, b'group': gid2group(st.st_gid),
  309. b'mtime': st_mtime_ns(st),
  310. }
  311. if self.numeric_owner:
  312. item[b'user'] = item[b'group'] = None
  313. xattrs = xattr.get_all(path, follow_symlinks=False)
  314. if xattrs:
  315. item[b'xattrs'] = StableDict(xattrs)
  316. if has_lchflags and st.st_flags:
  317. item[b'bsdflags'] = st.st_flags
  318. item[b'acl'] = acl_get(path, item, self.numeric_owner)
  319. return item
  320. def process_item(self, path, st):
  321. item = {b'path': make_path_safe(path)}
  322. item.update(self.stat_attrs(st, path))
  323. self.add_item(item)
  324. def process_dev(self, path, st):
  325. item = {b'path': make_path_safe(path), b'rdev': st.st_rdev}
  326. item.update(self.stat_attrs(st, path))
  327. self.add_item(item)
  328. def process_symlink(self, path, st):
  329. source = os.readlink(path)
  330. item = {b'path': make_path_safe(path), b'source': source}
  331. item.update(self.stat_attrs(st, path))
  332. self.add_item(item)
  333. def process_file(self, path, st, cache):
  334. safe_path = make_path_safe(path)
  335. # Is it a hard link?
  336. if st.st_nlink > 1:
  337. source = self.hard_links.get((st.st_ino, st.st_dev))
  338. if (st.st_ino, st.st_dev) in self.hard_links:
  339. item = self.stat_attrs(st, path)
  340. item.update({b'path': safe_path, b'source': source})
  341. self.add_item(item)
  342. return
  343. else:
  344. self.hard_links[st.st_ino, st.st_dev] = safe_path
  345. path_hash = self.key.id_hash(os.path.join(self.cwd, path).encode('utf-8', 'surrogateescape'))
  346. ids = cache.file_known_and_unchanged(path_hash, st)
  347. chunks = None
  348. if ids is not None:
  349. # Make sure all ids are available
  350. for id_ in ids:
  351. if not cache.seen_chunk(id_):
  352. break
  353. else:
  354. chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids]
  355. # Only chunkify the file if needed
  356. if chunks is None:
  357. with open(path, 'rb') as fd:
  358. chunks = []
  359. for chunk in chunkify(fd, WINDOW_SIZE, CHUNK_MASK, CHUNK_MIN, self.key.chunk_seed):
  360. chunks.append(cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats))
  361. cache.memorize_file(path_hash, st, [c[0] for c in chunks])
  362. item = {b'path': safe_path, b'chunks': chunks}
  363. item.update(self.stat_attrs(st, path))
  364. self.stats.nfiles += 1
  365. self.add_item(item)
  366. @staticmethod
  367. def list_archives(repository, key, manifest, cache=None):
  368. for name, info in manifest.archives.items():
  369. yield Archive(repository, key, manifest, name, cache=cache)
  370. class RobustUnpacker():
  371. """A restartable/robust version of the streaming msgpack unpacker
  372. """
  373. item_keys = [msgpack.packb(name) for name in ('path', 'mode', 'source', 'chunks', 'rdev', 'xattrs', 'user', 'group', 'uid', 'gid', 'mtime')]
  374. def __init__(self, validator):
  375. super(RobustUnpacker, self).__init__()
  376. self.validator = validator
  377. self._buffered_data = []
  378. self._resync = False
  379. self._unpacker = msgpack.Unpacker(object_hook=StableDict)
  380. def resync(self):
  381. self._buffered_data = []
  382. self._resync = True
  383. def feed(self, data):
  384. if self._resync:
  385. self._buffered_data.append(data)
  386. else:
  387. self._unpacker.feed(data)
  388. def __iter__(self):
  389. return self
  390. def __next__(self):
  391. if self._resync:
  392. data = b''.join(self._buffered_data)
  393. while self._resync:
  394. if not data:
  395. raise StopIteration
  396. # Abort early if the data does not look like a serialized dict
  397. if len(data) < 2 or ((data[0] & 0xf0) != 0x80) or ((data[1] & 0xe0) != 0xa0):
  398. data = data[1:]
  399. continue
  400. # Make sure it looks like an item dict
  401. for pattern in self.item_keys:
  402. if data[1:].startswith(pattern):
  403. break
  404. else:
  405. data = data[1:]
  406. continue
  407. self._unpacker = msgpack.Unpacker(object_hook=StableDict)
  408. self._unpacker.feed(data)
  409. try:
  410. item = next(self._unpacker)
  411. if self.validator(item):
  412. self._resync = False
  413. return item
  414. # Ignore exceptions that might be raised when feeding
  415. # msgpack with invalid data
  416. except (TypeError, ValueError, StopIteration):
  417. pass
  418. data = data[1:]
  419. else:
  420. return next(self._unpacker)
  421. class ArchiveChecker:
  422. def __init__(self):
  423. self.error_found = False
  424. self.possibly_superseded = set()
  425. self.tmpdir = tempfile.mkdtemp()
  426. def __del__(self):
  427. shutil.rmtree(self.tmpdir)
  428. def check(self, repository, repair=False):
  429. self.report_progress('Starting archive consistency check...')
  430. self.repair = repair
  431. self.repository = repository
  432. self.init_chunks()
  433. self.key = self.identify_key(repository)
  434. if not Manifest.MANIFEST_ID in self.chunks:
  435. self.manifest = self.rebuild_manifest()
  436. else:
  437. self.manifest, _ = Manifest.load(repository, key=self.key)
  438. self.rebuild_refcounts()
  439. self.verify_chunks()
  440. if not self.error_found:
  441. self.report_progress('Archive consistency check complete, no problems found.')
  442. return self.repair or not self.error_found
  443. def init_chunks(self):
  444. """Fetch a list of all object keys from repository
  445. """
  446. # Explicity set the initial hash table capacity to avoid performance issues
  447. # due to hash table "resonance"
  448. capacity = int(len(self.repository) * 1.2)
  449. self.chunks = ChunkIndex.create(os.path.join(self.tmpdir, 'chunks').encode('utf-8'), capacity=capacity)
  450. marker = None
  451. while True:
  452. result = self.repository.list(limit=10000, marker=marker)
  453. if not result:
  454. break
  455. marker = result[-1]
  456. for id_ in result:
  457. self.chunks[id_] = (0, 0, 0)
  458. def report_progress(self, msg, error=False):
  459. if error:
  460. self.error_found = True
  461. print(msg, file=sys.stderr if error else sys.stdout)
  462. def identify_key(self, repository):
  463. cdata = repository.get(next(self.chunks.iteritems())[0])
  464. return key_factory(repository, cdata)
  465. def rebuild_manifest(self):
  466. """Rebuild the manifest object if it is missing
  467. Iterates through all objects in the repository looking for archive metadata blocks.
  468. """
  469. self.report_progress('Rebuilding missing manifest, this might take some time...', error=True)
  470. manifest = Manifest(self.key, self.repository)
  471. for chunk_id, _ in self.chunks.iteritems():
  472. cdata = self.repository.get(chunk_id)
  473. data = self.key.decrypt(chunk_id, cdata)
  474. # Some basic sanity checks of the payload before feeding it into msgpack
  475. if len(data) < 2 or ((data[0] & 0xf0) != 0x80) or ((data[1] & 0xe0) != 0xa0):
  476. continue
  477. if not b'cmdline' in data or not b'\xa7version\x01' in data:
  478. continue
  479. try:
  480. archive = msgpack.unpackb(data)
  481. except:
  482. continue
  483. if isinstance(archive, dict) and b'items' in archive and b'cmdline' in archive:
  484. self.report_progress('Found archive ' + archive[b'name'].decode('utf-8'), error=True)
  485. manifest.archives[archive[b'name'].decode('utf-8')] = {b'id': chunk_id, b'time': archive[b'time']}
  486. self.report_progress('Manifest rebuild complete', error=True)
  487. return manifest
  488. def rebuild_refcounts(self):
  489. """Rebuild object reference counts by walking the metadata
  490. Missing and/or incorrect data is repaired when detected
  491. """
  492. # Exclude the manifest from chunks
  493. del self.chunks[Manifest.MANIFEST_ID]
  494. def mark_as_possibly_superseded(id_):
  495. if self.chunks.get(id_, (0,))[0] == 0:
  496. self.possibly_superseded.add(id_)
  497. def add_callback(chunk):
  498. id_ = self.key.id_hash(chunk)
  499. cdata = self.key.encrypt(chunk)
  500. add_reference(id_, len(chunk), len(cdata), cdata)
  501. return id_
  502. def add_reference(id_, size, csize, cdata=None):
  503. try:
  504. count, _, _ = self.chunks[id_]
  505. self.chunks[id_] = count + 1, size, csize
  506. except KeyError:
  507. assert cdata is not None
  508. self.chunks[id_] = 1, size, csize
  509. if self.repair:
  510. self.repository.put(id_, cdata)
  511. def verify_file_chunks(item):
  512. """Verifies that all file chunks are present
  513. Missing file chunks will be replaced with new chunks of the same
  514. length containing all zeros.
  515. """
  516. offset = 0
  517. chunk_list = []
  518. for chunk_id, size, csize in item[b'chunks']:
  519. if not chunk_id in self.chunks:
  520. # If a file chunk is missing, create an all empty replacement chunk
  521. self.report_progress('{}: Missing file chunk detected (Byte {}-{})'.format(item[b'path'].decode('utf-8', 'surrogateescape'), offset, offset + size), error=True)
  522. data = bytes(size)
  523. chunk_id = self.key.id_hash(data)
  524. cdata = self.key.encrypt(data)
  525. csize = len(cdata)
  526. add_reference(chunk_id, size, csize, cdata)
  527. else:
  528. add_reference(chunk_id, size, csize)
  529. chunk_list.append((chunk_id, size, csize))
  530. offset += size
  531. item[b'chunks'] = chunk_list
  532. def robust_iterator(archive):
  533. """Iterates through all archive items
  534. Missing item chunks will be skipped and the msgpack stream will be restarted
  535. """
  536. unpacker = RobustUnpacker(lambda item: isinstance(item, dict) and b'path' in item)
  537. _state = 0
  538. def missing_chunk_detector(chunk_id):
  539. nonlocal _state
  540. if _state % 2 != int(not chunk_id in self.chunks):
  541. _state += 1
  542. return _state
  543. for state, items in groupby(archive[b'items'], missing_chunk_detector):
  544. items = list(items)
  545. if state % 2:
  546. self.report_progress('Archive metadata damage detected', error=True)
  547. continue
  548. if state > 0:
  549. unpacker.resync()
  550. for chunk_id, cdata in zip(items, repository.get_many(items)):
  551. unpacker.feed(self.key.decrypt(chunk_id, cdata))
  552. for item in unpacker:
  553. yield item
  554. repository = cache_if_remote(self.repository)
  555. num_archives = len(self.manifest.archives)
  556. for i, (name, info) in enumerate(list(self.manifest.archives.items()), 1):
  557. self.report_progress('Analyzing archive {} ({}/{})'.format(name, i, num_archives))
  558. archive_id = info[b'id']
  559. if not archive_id in self.chunks:
  560. self.report_progress('Archive metadata block is missing', error=True)
  561. del self.manifest.archives[name]
  562. continue
  563. mark_as_possibly_superseded(archive_id)
  564. cdata = self.repository.get(archive_id)
  565. data = self.key.decrypt(archive_id, cdata)
  566. archive = StableDict(msgpack.unpackb(data))
  567. if archive[b'version'] != 1:
  568. raise Exception('Unknown archive metadata version')
  569. decode_dict(archive, (b'name', b'hostname', b'username', b'time')) # fixme: argv
  570. items_buffer = ChunkBuffer(self.key)
  571. items_buffer.write_chunk = add_callback
  572. for item in robust_iterator(archive):
  573. if b'chunks' in item:
  574. verify_file_chunks(item)
  575. items_buffer.add(item)
  576. items_buffer.flush(flush=True)
  577. for previous_item_id in archive[b'items']:
  578. mark_as_possibly_superseded(previous_item_id)
  579. archive[b'items'] = items_buffer.chunks
  580. data = msgpack.packb(archive, unicode_errors='surrogateescape')
  581. new_archive_id = self.key.id_hash(data)
  582. cdata = self.key.encrypt(data)
  583. add_reference(new_archive_id, len(data), len(cdata), cdata)
  584. info[b'id'] = new_archive_id
  585. def verify_chunks(self):
  586. unused = set()
  587. for id_, (count, size, csize) in self.chunks.iteritems():
  588. if count == 0:
  589. unused.add(id_)
  590. orphaned = unused - self.possibly_superseded
  591. if orphaned:
  592. self.report_progress('{} orphaned objects found'.format(len(orphaned)), error=True)
  593. if self.repair:
  594. for id_ in unused:
  595. self.repository.delete(id_)
  596. self.manifest.write()
  597. self.repository.commit()