archive.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. from datetime import datetime
  2. from getpass import getuser
  3. import msgpack
  4. import os
  5. import socket
  6. import stat
  7. import sys
  8. from xattr import xattr, XATTR_NOFOLLOW
  9. from . import NS_ARCHIVE_METADATA, NS_ARCHIVE_ITEMS, NS_ARCHIVE_CHUNKS, NS_CHUNK
  10. from ._speedups import chunkify
  11. from .helpers import uid2user, user2uid, gid2group, group2gid, IntegrityError
  12. CHUNK_SIZE = 64 * 1024
  13. WINDOW_SIZE = 4096
  14. have_lchmod = hasattr(os, 'lchmod')
  15. linux = sys.platform == 'linux2'
  16. class Archive(object):
  17. class DoesNotExist(Exception):
  18. pass
  19. def __init__(self, store, keychain, name=None):
  20. self.keychain = keychain
  21. self.store = store
  22. self.items = []
  23. self.chunks = []
  24. self.chunk_idx = {}
  25. self.hard_links = {}
  26. if name:
  27. self.load(self.keychain.id_hash(name))
  28. def load(self, id):
  29. self.id = id
  30. try:
  31. data, self.hash = self.keychain.decrypt(self.store.get(NS_ARCHIVE_METADATA, self.id))
  32. except self.store.DoesNotExist:
  33. raise self.DoesNotExist
  34. self.metadata = msgpack.unpackb(data)
  35. assert self.metadata['version'] == 1
  36. def get_items(self):
  37. data, chunks_hash = self.keychain.decrypt(self.store.get(NS_ARCHIVE_CHUNKS, self.id))
  38. chunks = msgpack.unpackb(data)
  39. assert chunks['version'] == 1
  40. assert self.metadata['chunks_hash'] == chunks_hash
  41. self.chunks = chunks['chunks']
  42. data, items_hash = self.keychain.decrypt(self.store.get(NS_ARCHIVE_ITEMS, self.id))
  43. items = msgpack.unpackb(data)
  44. assert items['version'] == 1
  45. assert self.metadata['items_hash'] == items_hash
  46. self.items = items['items']
  47. for i, chunk in enumerate(self.chunks):
  48. self.chunk_idx[i] = chunk[0]
  49. def save(self, name):
  50. self.id = self.keychain.id_hash(name)
  51. chunks = {'version': 1, 'chunks': self.chunks}
  52. data, chunks_hash = self.keychain.encrypt_create(msgpack.packb(chunks))
  53. self.store.put(NS_ARCHIVE_CHUNKS, self.id, data)
  54. items = {'version': 1, 'items': self.items}
  55. data, items_hash = self.keychain.encrypt_read(msgpack.packb(items))
  56. self.store.put(NS_ARCHIVE_ITEMS, self.id, data)
  57. metadata = {
  58. 'version': 1,
  59. 'name': name,
  60. 'chunks_hash': chunks_hash,
  61. 'items_hash': items_hash,
  62. 'cmdline': sys.argv,
  63. 'hostname': socket.gethostname(),
  64. 'username': getuser(),
  65. 'time': datetime.utcnow().isoformat(),
  66. }
  67. data, self.hash = self.keychain.encrypt_read(msgpack.packb(metadata))
  68. self.store.put(NS_ARCHIVE_METADATA, self.id, data)
  69. self.store.commit()
  70. def stats(self, cache):
  71. self.get_items()
  72. osize = csize = usize = 0
  73. for item in self.items:
  74. if stat.S_ISREG(item['mode']) and not 'source' in item:
  75. osize += item['size']
  76. for id, size in self.chunks:
  77. csize += size
  78. if cache.seen_chunk(id) == 1:
  79. usize += size
  80. return osize, csize, usize
  81. def extract_item(self, item, dest=None):
  82. dest = dest or os.getcwdu()
  83. dir_stat_queue = []
  84. assert item['path'][0] not in ('/', '\\', ':')
  85. path = os.path.join(dest, item['path'].decode('utf-8'))
  86. mode = item['mode']
  87. if stat.S_ISDIR(mode):
  88. if not os.path.exists(path):
  89. os.makedirs(path)
  90. self.restore_attrs(path, item)
  91. elif stat.S_ISFIFO(mode):
  92. if not os.path.exists(os.path.dirname(path)):
  93. os.makedirs(os.path.dirname(path))
  94. os.mkfifo(path)
  95. self.restore_attrs(path, item)
  96. elif stat.S_ISLNK(mode):
  97. if not os.path.exists(os.path.dirname(path)):
  98. os.makedirs(os.path.dirname(path))
  99. source = item['source']
  100. if os.path.exists(path):
  101. os.unlink(path)
  102. os.symlink(source, path)
  103. self.restore_attrs(path, item, symlink=True)
  104. elif stat.S_ISREG(mode):
  105. if not os.path.exists(os.path.dirname(path)):
  106. os.makedirs(os.path.dirname(path))
  107. # Hard link?
  108. if 'source' in item:
  109. source = os.path.join(dest, item['source'])
  110. if os.path.exists(path):
  111. os.unlink(path)
  112. os.link(source, path)
  113. else:
  114. with open(path, 'wb') as fd:
  115. for chunk in item['chunks']:
  116. id = self.chunk_idx[chunk]
  117. try:
  118. data, hash = self.keychain.decrypt(self.store.get(NS_CHUNK, id))
  119. if self.keychain.id_hash(data) != id:
  120. raise IntegrityError('chunk id did not match')
  121. fd.write(data)
  122. except ValueError:
  123. raise Exception('Invalid chunk checksum')
  124. self.restore_attrs(path, item)
  125. else:
  126. raise Exception('Unknown archive item type %r' % item['mode'])
  127. def restore_attrs(self, path, item, symlink=False):
  128. xattrs = item.get('xattrs')
  129. if xattrs:
  130. xa = xattr(path, XATTR_NOFOLLOW)
  131. for k, v in xattrs.items():
  132. try:
  133. xa.set(k, v)
  134. except KeyError:
  135. pass
  136. if have_lchmod:
  137. os.lchmod(path, item['mode'])
  138. elif not symlink:
  139. os.chmod(path, item['mode'])
  140. uid = user2uid(item['user']) or item['uid']
  141. gid = group2gid(item['group']) or item['gid']
  142. try:
  143. os.lchown(path, uid, gid)
  144. except OSError:
  145. pass
  146. if not symlink:
  147. # FIXME: We should really call futimes here (c extension required)
  148. os.utime(path, (item['atime'], item['mtime']))
  149. def verify_file(self, item):
  150. for chunk in item['chunks']:
  151. id = self.chunk_idx[chunk]
  152. try:
  153. data, hash = self.keychain.decrypt(self.store.get(NS_CHUNK, id))
  154. if self.keychain.id_hash(data) != id:
  155. raise IntegrityError('chunk id did not match')
  156. except IntegrityError:
  157. return False
  158. return True
  159. def delete(self, cache):
  160. self.get_items()
  161. self.store.delete(NS_ARCHIVE_CHUNKS, self.id)
  162. self.store.delete(NS_ARCHIVE_ITEMS, self.id)
  163. self.store.delete(NS_ARCHIVE_METADATA, self.id)
  164. for id, size in self.chunks:
  165. cache.chunk_decref(id)
  166. self.store.commit()
  167. cache.save()
  168. def stat_attrs(self, st, path):
  169. item = {
  170. 'mode': st.st_mode,
  171. 'uid': st.st_uid, 'user': uid2user(st.st_uid),
  172. 'gid': st.st_gid, 'group': gid2group(st.st_gid),
  173. 'atime': st.st_atime, 'mtime': st.st_mtime,
  174. }
  175. try:
  176. xa = xattr(path, XATTR_NOFOLLOW)
  177. xattrs = {}
  178. for key in xa:
  179. # Only store the user namespace on Linux
  180. if linux and not key.startswith('user'):
  181. continue
  182. xattrs[key] = xa[key]
  183. if xattrs:
  184. item['xattrs'] = xattrs
  185. except IOError:
  186. pass
  187. return item
  188. def process_dir(self, path, st):
  189. item = {'path': path.lstrip('/\\:')}
  190. item.update(self.stat_attrs(st, path))
  191. self.items.append(item)
  192. def process_fifo(self, path, st):
  193. item = {'path': path.lstrip('/\\:')}
  194. item.update(self.stat_attrs(st, path))
  195. self.items.append(item)
  196. def process_symlink(self, path, st):
  197. source = os.readlink(path)
  198. item = {'path': path.lstrip('/\\:'), 'source': source}
  199. item.update(self.stat_attrs(st, path))
  200. self.items.append(item)
  201. def process_file(self, path, st, cache):
  202. safe_path = path.lstrip('/\\:')
  203. # Is it a hard link?
  204. if st.st_nlink > 1:
  205. source = self.hard_links.get((st.st_ino, st.st_dev))
  206. if (st.st_ino, st.st_dev) in self.hard_links:
  207. self.items.append({'path': path, 'source': source})
  208. return
  209. else:
  210. self.hard_links[st.st_ino, st.st_dev] = safe_path
  211. path_hash = self.keychain.id_hash(path.encode('utf-8'))
  212. ids, size = cache.file_known_and_unchanged(path_hash, st)
  213. if ids is not None:
  214. # Make sure all ids are available
  215. for id in ids:
  216. if not cache.seen_chunk(id):
  217. ids = None
  218. break
  219. else:
  220. chunks = [self.process_chunk2(id, cache) for id in ids]
  221. # Only chunkify the file if needed
  222. if ids is None:
  223. fd = open(path, 'rb')
  224. with open(path, 'rb') as fd:
  225. size = 0
  226. ids = []
  227. chunks = []
  228. for chunk in chunkify(fd, CHUNK_SIZE, WINDOW_SIZE,
  229. self.keychain.get_chunkify_seed()):
  230. id = self.keychain.id_hash(chunk)
  231. ids.append(id)
  232. try:
  233. chunks.append(self.chunk_idx[id])
  234. except KeyError:
  235. chunks.append(self.process_chunk(id, chunk, cache))
  236. size += len(chunk)
  237. cache.memorize_file_chunks(path_hash, st, ids)
  238. item = {'path': safe_path, 'chunks': chunks, 'size': size}
  239. item.update(self.stat_attrs(st, path))
  240. self.items.append(item)
  241. def process_chunk2(self, id, cache):
  242. try:
  243. return self.chunk_idx[id]
  244. except KeyError:
  245. idx = len(self.chunks)
  246. id, size = cache.chunk_incref(id)
  247. self.chunks.append((id, size))
  248. self.chunk_idx[id] = idx
  249. return idx
  250. def process_chunk(self, id, data, cache):
  251. idx = len(self.chunks)
  252. id, size = cache.add_chunk(id, data)
  253. self.chunks.append((id, size))
  254. self.chunk_idx[id] = idx
  255. return idx
  256. @staticmethod
  257. def list_archives(store, keychain):
  258. for id in list(store.list(NS_ARCHIVE_METADATA)):
  259. archive = Archive(store, keychain)
  260. archive.load(id)
  261. yield archive