concurrent.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
  2. #
  3. # Permission is hereby granted, free of charge, to any person obtaining a
  4. # copy of this software and associated documentation files (the
  5. # "Software"), to deal in the Software without restriction, including
  6. # without limitation the rights to use, copy, modify, merge, publish, dis-
  7. # tribute, sublicense, and/or sell copies of the Software, and to permit
  8. # persons to whom the Software is furnished to do so, subject to the fol-
  9. # lowing conditions:
  10. #
  11. # The above copyright notice and this permission notice shall be included
  12. # in all copies or substantial portions of the Software.
  13. #
  14. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  15. # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
  16. # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
  17. # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  18. # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. # IN THE SOFTWARE.
  21. #
  22. import os
  23. import math
  24. import threading
  25. import hashlib
  26. import time
  27. import logging
  28. from boto.compat import Queue
  29. import binascii
  30. from boto.glacier.utils import DEFAULT_PART_SIZE, minimum_part_size, \
  31. chunk_hashes, tree_hash, bytes_to_hex
  32. from boto.glacier.exceptions import UploadArchiveError, \
  33. DownloadArchiveError, \
  34. TreeHashDoesNotMatchError
  35. _END_SENTINEL = object()
  36. log = logging.getLogger('boto.glacier.concurrent')
  37. class ConcurrentTransferer(object):
  38. def __init__(self, part_size=DEFAULT_PART_SIZE, num_threads=10):
  39. self._part_size = part_size
  40. self._num_threads = num_threads
  41. self._threads = []
  42. def _calculate_required_part_size(self, total_size):
  43. min_part_size_required = minimum_part_size(total_size)
  44. if self._part_size >= min_part_size_required:
  45. part_size = self._part_size
  46. else:
  47. part_size = min_part_size_required
  48. log.debug("The part size specified (%s) is smaller than "
  49. "the minimum required part size. Using a part "
  50. "size of: %s", self._part_size, part_size)
  51. total_parts = int(math.ceil(total_size / float(part_size)))
  52. return total_parts, part_size
  53. def _shutdown_threads(self):
  54. log.debug("Shutting down threads.")
  55. for thread in self._threads:
  56. thread.should_continue = False
  57. for thread in self._threads:
  58. thread.join()
  59. log.debug("Threads have exited.")
  60. def _add_work_items_to_queue(self, total_parts, worker_queue, part_size):
  61. log.debug("Adding work items to queue.")
  62. for i in range(total_parts):
  63. worker_queue.put((i, part_size))
  64. for i in range(self._num_threads):
  65. worker_queue.put(_END_SENTINEL)
  66. class ConcurrentUploader(ConcurrentTransferer):
  67. """Concurrently upload an archive to glacier.
  68. This class uses a thread pool to concurrently upload an archive
  69. to glacier using the multipart upload API.
  70. The threadpool is completely managed by this class and is
  71. transparent to the users of this class.
  72. """
  73. def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE,
  74. num_threads=10):
  75. """
  76. :type api: :class:`boto.glacier.layer1.Layer1`
  77. :param api: A layer1 glacier object.
  78. :type vault_name: str
  79. :param vault_name: The name of the vault.
  80. :type part_size: int
  81. :param part_size: The size, in bytes, of the chunks to use when uploading
  82. the archive parts. The part size must be a megabyte multiplied by
  83. a power of two.
  84. :type num_threads: int
  85. :param num_threads: The number of threads to spawn for the thread pool.
  86. The number of threads will control how much parts are being
  87. concurrently uploaded.
  88. """
  89. super(ConcurrentUploader, self).__init__(part_size, num_threads)
  90. self._api = api
  91. self._vault_name = vault_name
  92. def upload(self, filename, description=None):
  93. """Concurrently create an archive.
  94. The part_size value specified when the class was constructed
  95. will be used *unless* it is smaller than the minimum required
  96. part size needed for the size of the given file. In that case,
  97. the part size used will be the minimum part size required
  98. to properly upload the given file.
  99. :type file: str
  100. :param file: The filename to upload
  101. :type description: str
  102. :param description: The description of the archive.
  103. :rtype: str
  104. :return: The archive id of the newly created archive.
  105. """
  106. total_size = os.stat(filename).st_size
  107. total_parts, part_size = self._calculate_required_part_size(total_size)
  108. hash_chunks = [None] * total_parts
  109. worker_queue = Queue()
  110. result_queue = Queue()
  111. response = self._api.initiate_multipart_upload(self._vault_name,
  112. part_size,
  113. description)
  114. upload_id = response['UploadId']
  115. # The basic idea is to add the chunks (the offsets not the actual
  116. # contents) to a work queue, start up a thread pool, let the crank
  117. # through the items in the work queue, and then place their results
  118. # in a result queue which we use to complete the multipart upload.
  119. self._add_work_items_to_queue(total_parts, worker_queue, part_size)
  120. self._start_upload_threads(result_queue, upload_id,
  121. worker_queue, filename)
  122. try:
  123. self._wait_for_upload_threads(hash_chunks, result_queue,
  124. total_parts)
  125. except UploadArchiveError as e:
  126. log.debug("An error occurred while uploading an archive, "
  127. "aborting multipart upload.")
  128. self._api.abort_multipart_upload(self._vault_name, upload_id)
  129. raise e
  130. log.debug("Completing upload.")
  131. response = self._api.complete_multipart_upload(
  132. self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)),
  133. total_size)
  134. log.debug("Upload finished.")
  135. return response['ArchiveId']
  136. def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
  137. for _ in range(total_parts):
  138. result = result_queue.get()
  139. if isinstance(result, Exception):
  140. log.debug("An error was found in the result queue, terminating "
  141. "threads: %s", result)
  142. self._shutdown_threads()
  143. raise UploadArchiveError("An error occurred while uploading "
  144. "an archive: %s" % result)
  145. # Each unit of work returns the tree hash for the given part
  146. # number, which we use at the end to compute the tree hash of
  147. # the entire archive.
  148. part_number, tree_sha256 = result
  149. hash_chunks[part_number] = tree_sha256
  150. self._shutdown_threads()
  151. def _start_upload_threads(self, result_queue, upload_id, worker_queue,
  152. filename):
  153. log.debug("Starting threads.")
  154. for _ in range(self._num_threads):
  155. thread = UploadWorkerThread(self._api, self._vault_name, filename,
  156. upload_id, worker_queue, result_queue)
  157. time.sleep(0.2)
  158. thread.start()
  159. self._threads.append(thread)
  160. class TransferThread(threading.Thread):
  161. def __init__(self, worker_queue, result_queue):
  162. super(TransferThread, self).__init__()
  163. self._worker_queue = worker_queue
  164. self._result_queue = result_queue
  165. # This value can be set externally by other objects
  166. # to indicate that the thread should be shut down.
  167. self.should_continue = True
  168. def run(self):
  169. while self.should_continue:
  170. try:
  171. work = self._worker_queue.get(timeout=1)
  172. except Empty:
  173. continue
  174. if work is _END_SENTINEL:
  175. self._cleanup()
  176. return
  177. result = self._process_chunk(work)
  178. self._result_queue.put(result)
  179. self._cleanup()
  180. def _process_chunk(self, work):
  181. pass
  182. def _cleanup(self):
  183. pass
  184. class UploadWorkerThread(TransferThread):
  185. def __init__(self, api, vault_name, filename, upload_id,
  186. worker_queue, result_queue, num_retries=5,
  187. time_between_retries=5,
  188. retry_exceptions=Exception):
  189. super(UploadWorkerThread, self).__init__(worker_queue, result_queue)
  190. self._api = api
  191. self._vault_name = vault_name
  192. self._filename = filename
  193. self._fileobj = open(filename, 'rb')
  194. self._upload_id = upload_id
  195. self._num_retries = num_retries
  196. self._time_between_retries = time_between_retries
  197. self._retry_exceptions = retry_exceptions
  198. def _process_chunk(self, work):
  199. result = None
  200. for i in range(self._num_retries + 1):
  201. try:
  202. result = self._upload_chunk(work)
  203. break
  204. except self._retry_exceptions as e:
  205. log.error("Exception caught uploading part number %s for "
  206. "vault %s, attempt: (%s / %s), filename: %s, "
  207. "exception: %s, msg: %s",
  208. work[0], self._vault_name, i + 1, self._num_retries + 1,
  209. self._filename, e.__class__, e)
  210. time.sleep(self._time_between_retries)
  211. result = e
  212. return result
  213. def _upload_chunk(self, work):
  214. part_number, part_size = work
  215. start_byte = part_number * part_size
  216. self._fileobj.seek(start_byte)
  217. contents = self._fileobj.read(part_size)
  218. linear_hash = hashlib.sha256(contents).hexdigest()
  219. tree_hash_bytes = tree_hash(chunk_hashes(contents))
  220. byte_range = (start_byte, start_byte + len(contents) - 1)
  221. log.debug("Uploading chunk %s of size %s", part_number, part_size)
  222. response = self._api.upload_part(self._vault_name, self._upload_id,
  223. linear_hash,
  224. bytes_to_hex(tree_hash_bytes),
  225. byte_range, contents)
  226. # Reading the response allows the connection to be reused.
  227. response.read()
  228. return (part_number, tree_hash_bytes)
  229. def _cleanup(self):
  230. self._fileobj.close()
  231. class ConcurrentDownloader(ConcurrentTransferer):
  232. """
  233. Concurrently download an archive from glacier.
  234. This class uses a thread pool to concurrently download an archive
  235. from glacier.
  236. The threadpool is completely managed by this class and is
  237. transparent to the users of this class.
  238. """
  239. def __init__(self, job, part_size=DEFAULT_PART_SIZE,
  240. num_threads=10):
  241. """
  242. :param job: A layer2 job object for archive retrieval object.
  243. :param part_size: The size, in bytes, of the chunks to use when uploading
  244. the archive parts. The part size must be a megabyte multiplied by
  245. a power of two.
  246. """
  247. super(ConcurrentDownloader, self).__init__(part_size, num_threads)
  248. self._job = job
  249. def download(self, filename):
  250. """
  251. Concurrently download an archive.
  252. :param filename: The filename to download the archive to
  253. :type filename: str
  254. """
  255. total_size = self._job.archive_size
  256. total_parts, part_size = self._calculate_required_part_size(total_size)
  257. worker_queue = Queue()
  258. result_queue = Queue()
  259. self._add_work_items_to_queue(total_parts, worker_queue, part_size)
  260. self._start_download_threads(result_queue, worker_queue)
  261. try:
  262. self._wait_for_download_threads(filename, result_queue, total_parts)
  263. except DownloadArchiveError as e:
  264. log.debug("An error occurred while downloading an archive: %s", e)
  265. raise e
  266. log.debug("Download completed.")
  267. def _wait_for_download_threads(self, filename, result_queue, total_parts):
  268. """
  269. Waits until the result_queue is filled with all the downloaded parts
  270. This indicates that all part downloads have completed
  271. Saves downloaded parts into filename
  272. :param filename:
  273. :param result_queue:
  274. :param total_parts:
  275. """
  276. hash_chunks = [None] * total_parts
  277. with open(filename, "wb") as f:
  278. for _ in range(total_parts):
  279. result = result_queue.get()
  280. if isinstance(result, Exception):
  281. log.debug("An error was found in the result queue, "
  282. "terminating threads: %s", result)
  283. self._shutdown_threads()
  284. raise DownloadArchiveError(
  285. "An error occurred while uploading "
  286. "an archive: %s" % result)
  287. part_number, part_size, actual_hash, data = result
  288. hash_chunks[part_number] = actual_hash
  289. start_byte = part_number * part_size
  290. f.seek(start_byte)
  291. f.write(data)
  292. f.flush()
  293. final_hash = bytes_to_hex(tree_hash(hash_chunks))
  294. log.debug("Verifying final tree hash of archive, expecting: %s, "
  295. "actual: %s", self._job.sha256_treehash, final_hash)
  296. if self._job.sha256_treehash != final_hash:
  297. self._shutdown_threads()
  298. raise TreeHashDoesNotMatchError(
  299. "Tree hash for entire archive does not match, "
  300. "expected: %s, got: %s" % (self._job.sha256_treehash,
  301. final_hash))
  302. self._shutdown_threads()
  303. def _start_download_threads(self, result_queue, worker_queue):
  304. log.debug("Starting threads.")
  305. for _ in range(self._num_threads):
  306. thread = DownloadWorkerThread(self._job, worker_queue, result_queue)
  307. time.sleep(0.2)
  308. thread.start()
  309. self._threads.append(thread)
  310. class DownloadWorkerThread(TransferThread):
  311. def __init__(self, job,
  312. worker_queue, result_queue,
  313. num_retries=5,
  314. time_between_retries=5,
  315. retry_exceptions=Exception):
  316. """
  317. Individual download thread that will download parts of the file from Glacier. Parts
  318. to download stored in work queue.
  319. Parts download to a temp dir with each part a separate file
  320. :param job: Glacier job object
  321. :param work_queue: A queue of tuples which include the part_number and
  322. part_size
  323. :param result_queue: A priority queue of tuples which include the
  324. part_number and the path to the temp file that holds that
  325. part's data.
  326. """
  327. super(DownloadWorkerThread, self).__init__(worker_queue, result_queue)
  328. self._job = job
  329. self._num_retries = num_retries
  330. self._time_between_retries = time_between_retries
  331. self._retry_exceptions = retry_exceptions
  332. def _process_chunk(self, work):
  333. """
  334. Attempt to download a part of the archive from Glacier
  335. Store the result in the result_queue
  336. :param work:
  337. """
  338. result = None
  339. for _ in range(self._num_retries):
  340. try:
  341. result = self._download_chunk(work)
  342. break
  343. except self._retry_exceptions as e:
  344. log.error("Exception caught downloading part number %s for "
  345. "job %s", work[0], self._job,)
  346. time.sleep(self._time_between_retries)
  347. result = e
  348. return result
  349. def _download_chunk(self, work):
  350. """
  351. Downloads a chunk of archive from Glacier. Saves the data to a temp file
  352. Returns the part number and temp file location
  353. :param work:
  354. """
  355. part_number, part_size = work
  356. start_byte = part_number * part_size
  357. byte_range = (start_byte, start_byte + part_size - 1)
  358. log.debug("Downloading chunk %s of size %s", part_number, part_size)
  359. response = self._job.get_output(byte_range)
  360. data = response.read()
  361. actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data)))
  362. if response['TreeHash'] != actual_hash:
  363. raise TreeHashDoesNotMatchError(
  364. "Tree hash for part number %s does not match, "
  365. "expected: %s, got: %s" % (part_number, response['TreeHash'],
  366. actual_hash))
  367. return (part_number, part_size, binascii.unhexlify(actual_hash), data)