archive.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. from __future__ import with_statement
  2. from datetime import datetime, timedelta
  3. from getpass import getuser
  4. import msgpack
  5. import os
  6. import socket
  7. import stat
  8. import sys
  9. from cStringIO import StringIO
  10. from xattr import xattr, XATTR_NOFOLLOW
  11. from . import NS_ARCHIVE_METADATA, NS_CHUNK
  12. from ._speedups import chunkify
  13. from .helpers import uid2user, user2uid, gid2group, group2gid, IntegrityError, \
  14. Counter, encode_filename
  15. CHUNK_SIZE = 64 * 1024
  16. WINDOW_SIZE = 4096
  17. have_lchmod = hasattr(os, 'lchmod')
  18. linux = sys.platform == 'linux2'
  19. class Archive(object):
  20. class DoesNotExist(Exception):
  21. pass
  22. def __init__(self, store, key, name=None, cache=None):
  23. self.key = key
  24. self.store = store
  25. self.cache = cache
  26. self.items = StringIO()
  27. self.items_ids = []
  28. self.hard_links = {}
  29. if name:
  30. self.load(self.key.archive_hash(name))
  31. def load(self, id):
  32. self.id = id
  33. try:
  34. data, self.hash = self.key.decrypt(self.store.get(NS_ARCHIVE_METADATA, self.id))
  35. except self.store.DoesNotExist:
  36. raise self.DoesNotExist
  37. self.metadata = msgpack.unpackb(data)
  38. assert self.metadata['version'] == 1
  39. @property
  40. def ts(self):
  41. """Timestamp of archive creation in UTC"""
  42. t, f = self.metadata['time'].split('.', 1)
  43. return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S') + timedelta(seconds=float('.' + f))
  44. def iter_items(self, callback):
  45. unpacker = msgpack.Unpacker()
  46. counter = Counter(0)
  47. def cb(chunk, error, id):
  48. if error:
  49. raise error
  50. assert not error
  51. counter.dec()
  52. data, items_hash = self.key.decrypt(chunk)
  53. assert self.key.id_hash(data) == id
  54. unpacker.feed(data)
  55. for item in unpacker:
  56. callback(item)
  57. for id, size, csize in self.metadata['items']:
  58. # Limit the number of concurrent items requests to 10
  59. self.store.flush_rpc(counter, 10)
  60. counter.inc()
  61. self.store.get(NS_CHUNK, id, callback=cb, callback_data=id)
  62. def add_item(self, item):
  63. self.items.write(msgpack.packb(item))
  64. if self.items.tell() > 1024 * 1024:
  65. self.flush_items()
  66. def flush_items(self, flush=False):
  67. if self.items.tell() == 0:
  68. return
  69. self.items.seek(0)
  70. chunks = list(str(s) for s in chunkify(self.items, CHUNK_SIZE, WINDOW_SIZE, self.key.chunk_seed))
  71. self.items.seek(0)
  72. self.items.truncate()
  73. for chunk in chunks[:-1]:
  74. self.items_ids.append(self.cache.add_chunk(self.key.id_hash(chunk), chunk))
  75. if flush or len(chunks) == 1:
  76. self.items_ids.append(self.cache.add_chunk(self.key.id_hash(chunks[-1]), chunks[-1]))
  77. else:
  78. self.items.write(chunks[-1])
  79. def save(self, name, cache):
  80. self.id = self.key.archive_hash(name)
  81. self.flush_items(flush=True)
  82. metadata = {
  83. 'version': 1,
  84. 'name': name,
  85. 'items': self.items_ids,
  86. 'cmdline': sys.argv,
  87. 'hostname': socket.gethostname(),
  88. 'username': getuser(),
  89. 'time': datetime.utcnow().isoformat(),
  90. }
  91. data, self.hash = self.key.encrypt(msgpack.packb(metadata))
  92. self.store.put(NS_ARCHIVE_METADATA, self.id, data)
  93. self.store.commit()
  94. cache.commit()
  95. def stats(self, cache):
  96. # This function is a bit evil since it abuses the cache to calculate
  97. # the stats. The cache transaction must be rolled back afterwards
  98. def cb(chunk, error, id):
  99. assert not error
  100. data, items_hash = self.key.decrypt(chunk)
  101. assert self.key.id_hash(data) == id
  102. unpacker.feed(data)
  103. for item in unpacker:
  104. try:
  105. for id, size, csize in item['chunks']:
  106. count, _, _ = self.cache.chunks[id]
  107. stats['osize'] += size
  108. stats['csize'] += csize
  109. if count == 1:
  110. stats['usize'] += csize
  111. self.cache.chunks[id] = count - 1, size, csize
  112. except KeyError:
  113. pass
  114. unpacker = msgpack.Unpacker()
  115. cache.begin_txn()
  116. stats = {'osize': 0, 'csize': 0, 'usize': 0}
  117. for id, size, csize in self.metadata['items']:
  118. stats['osize'] += size
  119. stats['csize'] += csize
  120. if self.cache.seen_chunk(id) == 1:
  121. stats['usize'] += csize
  122. self.store.get(NS_CHUNK, id, callback=cb, callback_data=id)
  123. self.cache.chunk_decref(id)
  124. self.store.flush_rpc()
  125. cache.rollback()
  126. return stats
  127. def extract_item(self, item, dest=None, start_cb=None, restore_attrs=True):
  128. dest = dest or os.getcwdu()
  129. dir_stat_queue = []
  130. assert item['path'][0] not in ('/', '\\', ':')
  131. path = os.path.join(dest, encode_filename(item['path']))
  132. mode = item['mode']
  133. if stat.S_ISDIR(mode):
  134. if not os.path.exists(path):
  135. os.makedirs(path)
  136. if restore_attrs:
  137. self.restore_attrs(path, item)
  138. elif stat.S_ISFIFO(mode):
  139. if not os.path.exists(os.path.dirname(path)):
  140. os.makedirs(os.path.dirname(path))
  141. os.mkfifo(path)
  142. self.restore_attrs(path, item)
  143. elif stat.S_ISLNK(mode):
  144. if not os.path.exists(os.path.dirname(path)):
  145. os.makedirs(os.path.dirname(path))
  146. source = item['source']
  147. if os.path.exists(path):
  148. os.unlink(path)
  149. os.symlink(source, path)
  150. self.restore_attrs(path, item, symlink=True)
  151. elif stat.S_ISREG(mode):
  152. if not os.path.exists(os.path.dirname(path)):
  153. os.makedirs(os.path.dirname(path))
  154. # Hard link?
  155. if 'source' in item:
  156. source = os.path.join(dest, item['source'])
  157. if os.path.exists(path):
  158. os.unlink(path)
  159. os.link(source, path)
  160. else:
  161. def extract_cb(chunk, error, (id, i)):
  162. if i == 0:
  163. state['fd'] = open(path, 'wb')
  164. start_cb(item)
  165. assert not error
  166. data, hash = self.key.decrypt(chunk)
  167. if self.key.id_hash(data) != id:
  168. raise IntegrityError('chunk hash did not match')
  169. state['fd'].write(data)
  170. if i == n - 1:
  171. state['fd'].close()
  172. self.restore_attrs(path, item)
  173. state = {}
  174. n = len(item['chunks'])
  175. ## 0 chunks indicates an empty (0 bytes) file
  176. if n == 0:
  177. open(path, 'wb').close()
  178. start_cb(item)
  179. self.restore_attrs(path, item)
  180. else:
  181. for i, (id, size, csize) in enumerate(item['chunks']):
  182. self.store.get(NS_CHUNK, id, callback=extract_cb, callback_data=(id, i))
  183. else:
  184. raise Exception('Unknown archive item type %r' % item['mode'])
  185. def restore_attrs(self, path, item, symlink=False):
  186. xattrs = item.get('xattrs')
  187. if xattrs:
  188. xa = xattr(path, XATTR_NOFOLLOW)
  189. for k, v in xattrs.items():
  190. try:
  191. xa.set(k, v)
  192. except (IOError, KeyError):
  193. pass
  194. if have_lchmod:
  195. os.lchmod(path, item['mode'])
  196. elif not symlink:
  197. os.chmod(path, item['mode'])
  198. uid = user2uid(item['user']) or item['uid']
  199. gid = group2gid(item['group']) or item['gid']
  200. try:
  201. os.lchown(path, uid, gid)
  202. except OSError:
  203. pass
  204. if not symlink:
  205. # FIXME: We should really call futimes here (c extension required)
  206. os.utime(path, (item['mtime'], item['mtime']))
  207. def verify_file(self, item, start, result):
  208. def verify_chunk(chunk, error, (id, i)):
  209. if error:
  210. raise error
  211. if i == 0:
  212. start(item)
  213. data, hash = self.key.decrypt(chunk)
  214. if self.key.id_hash(data) != id:
  215. result(item, False)
  216. elif i == n - 1:
  217. result(item, True)
  218. n = len(item['chunks'])
  219. if n == 0:
  220. start(item)
  221. result(item, True)
  222. else:
  223. for i, (id, size, csize) in enumerate(item['chunks']):
  224. self.store.get(NS_CHUNK, id, callback=verify_chunk, callback_data=(id, i))
  225. def delete(self, cache):
  226. def callback(chunk, error, id):
  227. assert not error
  228. data, items_hash = self.key.decrypt(chunk)
  229. if self.key.id_hash(data) != id:
  230. raise IntegrityError('Chunk checksum mismatch')
  231. unpacker.feed(data)
  232. for item in unpacker:
  233. try:
  234. for chunk_id, size, csize in item['chunks']:
  235. self.cache.chunk_decref(chunk_id)
  236. except KeyError:
  237. pass
  238. self.cache.chunk_decref(id)
  239. unpacker = msgpack.Unpacker()
  240. for id, size, csize in self.metadata['items']:
  241. self.store.get(NS_CHUNK, id, callback=callback, callback_data=id)
  242. self.store.flush_rpc()
  243. self.store.delete(NS_ARCHIVE_METADATA, self.id)
  244. self.store.commit()
  245. cache.commit()
  246. def stat_attrs(self, st, path):
  247. item = {
  248. 'mode': st.st_mode,
  249. 'uid': st.st_uid, 'user': uid2user(st.st_uid),
  250. 'gid': st.st_gid, 'group': gid2group(st.st_gid),
  251. 'mtime': st.st_mtime,
  252. }
  253. try:
  254. xa = xattr(path, XATTR_NOFOLLOW)
  255. xattrs = {}
  256. for key in xa:
  257. # Only store the user namespace on Linux
  258. if linux and not key.startswith('user'):
  259. continue
  260. xattrs[key] = xa[key]
  261. if xattrs:
  262. item['xattrs'] = xattrs
  263. except IOError:
  264. pass
  265. return item
  266. def process_dir(self, path, st):
  267. item = {'path': path.lstrip('/\\:')}
  268. item.update(self.stat_attrs(st, path))
  269. self.add_item(item)
  270. def process_fifo(self, path, st):
  271. item = {'path': path.lstrip('/\\:')}
  272. item.update(self.stat_attrs(st, path))
  273. self.add_item(item)
  274. def process_symlink(self, path, st):
  275. source = os.readlink(path)
  276. item = {'path': path.lstrip('/\\:'), 'source': source}
  277. item.update(self.stat_attrs(st, path))
  278. self.add_item(item)
  279. def process_file(self, path, st, cache):
  280. safe_path = path.lstrip('/\\:')
  281. # Is it a hard link?
  282. if st.st_nlink > 1:
  283. source = self.hard_links.get((st.st_ino, st.st_dev))
  284. if (st.st_ino, st.st_dev) in self.hard_links:
  285. item = self.stat_attrs(st, path)
  286. item.update({'path': safe_path, 'source': source})
  287. self.add_item(item)
  288. return
  289. else:
  290. self.hard_links[st.st_ino, st.st_dev] = safe_path
  291. path_hash = self.key.id_hash(path)
  292. ids = cache.file_known_and_unchanged(path_hash, st)
  293. chunks = None
  294. if ids is not None:
  295. # Make sure all ids are available
  296. for id in ids:
  297. if not cache.seen_chunk(id):
  298. break
  299. else:
  300. chunks = [cache.chunk_incref(id) for id in ids]
  301. # Only chunkify the file if needed
  302. if chunks is None:
  303. with open(path, 'rb') as fd:
  304. chunks = []
  305. for chunk in chunkify(fd, CHUNK_SIZE, WINDOW_SIZE,
  306. self.key.chunk_seed):
  307. chunks.append(cache.add_chunk(self.key.id_hash(chunk), chunk))
  308. ids = [id for id, _, _ in chunks]
  309. cache.memorize_file(path_hash, st, ids)
  310. item = {'path': safe_path, 'chunks': chunks}
  311. item.update(self.stat_attrs(st, path))
  312. self.add_item(item)
  313. @staticmethod
  314. def list_archives(store, key, cache=None):
  315. for id in list(store.list(NS_ARCHIVE_METADATA)):
  316. archive = Archive(store, key, cache=cache)
  317. archive.load(id)
  318. yield archive