stream_encoder_mt.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. ///////////////////////////////////////////////////////////////////////////////
  2. //
  3. /// \file stream_encoder_mt.c
  4. /// \brief Multithreaded .xz Stream encoder
  5. //
  6. // Author: Lasse Collin
  7. //
  8. // This file has been put into the public domain.
  9. // You can do whatever you want with this file.
  10. //
  11. ///////////////////////////////////////////////////////////////////////////////
  12. #include "filter_encoder.h"
  13. #include "easy_preset.h"
  14. #include "block_encoder.h"
  15. #include "block_buffer_encoder.h"
  16. #include "index_encoder.h"
  17. #include "outqueue.h"
  18. /// Maximum supported block size. This makes it simpler to prevent integer
  19. /// overflows if we are given unusually large block size.
  20. #define BLOCK_SIZE_MAX (UINT64_MAX / LZMA_THREADS_MAX)
  21. typedef enum {
  22. /// Waiting for work.
  23. THR_IDLE,
  24. /// Encoding is in progress.
  25. THR_RUN,
  26. /// Encoding is in progress but no more input data will
  27. /// be read.
  28. THR_FINISH,
  29. /// The main thread wants the thread to stop whatever it was doing
  30. /// but not exit.
  31. THR_STOP,
  32. /// The main thread wants the thread to exit. We could use
  33. /// cancellation but since there's stopped anyway, this is lazier.
  34. THR_EXIT,
  35. } worker_state;
  36. typedef struct lzma_stream_coder_s lzma_stream_coder;
  37. typedef struct worker_thread_s worker_thread;
  38. struct worker_thread_s {
  39. worker_state state;
  40. /// Input buffer of coder->block_size bytes. The main thread will
  41. /// put new input into this and update in_size accordingly. Once
  42. /// no more input is coming, state will be set to THR_FINISH.
  43. uint8_t *in;
  44. /// Amount of data available in the input buffer. This is modified
  45. /// only by the main thread.
  46. size_t in_size;
  47. /// Output buffer for this thread. This is set by the main
  48. /// thread every time a new Block is started with this thread
  49. /// structure.
  50. lzma_outbuf *outbuf;
  51. /// Pointer to the main structure is needed when putting this
  52. /// thread back to the stack of free threads.
  53. lzma_stream_coder *coder;
  54. /// The allocator is set by the main thread. Since a copy of the
  55. /// pointer is kept here, the application must not change the
  56. /// allocator before calling lzma_end().
  57. const lzma_allocator *allocator;
  58. /// Amount of uncompressed data that has already been compressed.
  59. uint64_t progress_in;
  60. /// Amount of compressed data that is ready.
  61. uint64_t progress_out;
  62. /// Block encoder
  63. lzma_next_coder block_encoder;
  64. /// Compression options for this Block
  65. lzma_block block_options;
  66. /// Filter chain for this thread. By copying the filters array
  67. /// to each thread it is possible to change the filter chain
  68. /// between Blocks using lzma_filters_update().
  69. lzma_filter filters[LZMA_FILTERS_MAX + 1];
  70. /// Next structure in the stack of free worker threads.
  71. worker_thread *next;
  72. mythread_mutex mutex;
  73. mythread_cond cond;
  74. /// The ID of this thread is used to join the thread
  75. /// when it's not needed anymore.
  76. mythread thread_id;
  77. };
  78. struct lzma_stream_coder_s {
  79. enum {
  80. SEQ_STREAM_HEADER,
  81. SEQ_BLOCK,
  82. SEQ_INDEX,
  83. SEQ_STREAM_FOOTER,
  84. } sequence;
  85. /// Start a new Block every block_size bytes of input unless
  86. /// LZMA_FULL_FLUSH or LZMA_FULL_BARRIER is used earlier.
  87. size_t block_size;
  88. /// The filter chain to use for the next Block.
  89. /// This can be updated using lzma_filters_update()
  90. /// after LZMA_FULL_BARRIER or LZMA_FULL_FLUSH.
  91. lzma_filter filters[LZMA_FILTERS_MAX + 1];
  92. /// A copy of filters[] will be put here when attempting to get
  93. /// a new worker thread. This will be copied to a worker thread
  94. /// when a thread becomes free and then this cache is marked as
  95. /// empty by setting [0].id = LZMA_VLI_UNKNOWN. Without this cache
  96. /// the filter options from filters[] would get uselessly copied
  97. /// multiple times (allocated and freed) when waiting for a new free
  98. /// worker thread.
  99. ///
  100. /// This is freed if filters[] is updated via lzma_filters_update().
  101. lzma_filter filters_cache[LZMA_FILTERS_MAX + 1];
  102. /// Index to hold sizes of the Blocks
  103. lzma_index *index;
  104. /// Index encoder
  105. lzma_next_coder index_encoder;
  106. /// Stream Flags for encoding the Stream Header and Stream Footer.
  107. lzma_stream_flags stream_flags;
  108. /// Buffer to hold Stream Header and Stream Footer.
  109. uint8_t header[LZMA_STREAM_HEADER_SIZE];
  110. /// Read position in header[]
  111. size_t header_pos;
  112. /// Output buffer queue for compressed data
  113. lzma_outq outq;
  114. /// How much memory to allocate for each lzma_outbuf.buf
  115. size_t outbuf_alloc_size;
  116. /// Maximum wait time if cannot use all the input and cannot
  117. /// fill the output buffer. This is in milliseconds.
  118. uint32_t timeout;
  119. /// Error code from a worker thread
  120. lzma_ret thread_error;
  121. /// Array of allocated thread-specific structures
  122. worker_thread *threads;
  123. /// Number of structures in "threads" above. This is also the
  124. /// number of threads that will be created at maximum.
  125. uint32_t threads_max;
  126. /// Number of thread structures that have been initialized, and
  127. /// thus the number of worker threads actually created so far.
  128. uint32_t threads_initialized;
  129. /// Stack of free threads. When a thread finishes, it puts itself
  130. /// back into this stack. This starts as empty because threads
  131. /// are created only when actually needed.
  132. worker_thread *threads_free;
  133. /// The most recent worker thread to which the main thread writes
  134. /// the new input from the application.
  135. worker_thread *thr;
  136. /// Amount of uncompressed data in Blocks that have already
  137. /// been finished.
  138. uint64_t progress_in;
  139. /// Amount of compressed data in Stream Header + Blocks that
  140. /// have already been finished.
  141. uint64_t progress_out;
  142. mythread_mutex mutex;
  143. mythread_cond cond;
  144. };
  145. /// Tell the main thread that something has gone wrong.
  146. static void
  147. worker_error(worker_thread *thr, lzma_ret ret)
  148. {
  149. assert(ret != LZMA_OK);
  150. assert(ret != LZMA_STREAM_END);
  151. mythread_sync(thr->coder->mutex) {
  152. if (thr->coder->thread_error == LZMA_OK)
  153. thr->coder->thread_error = ret;
  154. mythread_cond_signal(&thr->coder->cond);
  155. }
  156. return;
  157. }
  158. static worker_state
  159. worker_encode(worker_thread *thr, size_t *out_pos, worker_state state)
  160. {
  161. assert(thr->progress_in == 0);
  162. assert(thr->progress_out == 0);
  163. // Set the Block options.
  164. thr->block_options = (lzma_block){
  165. .version = 0,
  166. .check = thr->coder->stream_flags.check,
  167. .compressed_size = thr->outbuf->allocated,
  168. .uncompressed_size = thr->coder->block_size,
  169. .filters = thr->filters,
  170. };
  171. // Calculate maximum size of the Block Header. This amount is
  172. // reserved in the beginning of the buffer so that Block Header
  173. // along with Compressed Size and Uncompressed Size can be
  174. // written there.
  175. lzma_ret ret = lzma_block_header_size(&thr->block_options);
  176. if (ret != LZMA_OK) {
  177. worker_error(thr, ret);
  178. return THR_STOP;
  179. }
  180. // Initialize the Block encoder.
  181. ret = lzma_block_encoder_init(&thr->block_encoder,
  182. thr->allocator, &thr->block_options);
  183. if (ret != LZMA_OK) {
  184. worker_error(thr, ret);
  185. return THR_STOP;
  186. }
  187. size_t in_pos = 0;
  188. size_t in_size = 0;
  189. *out_pos = thr->block_options.header_size;
  190. const size_t out_size = thr->outbuf->allocated;
  191. do {
  192. mythread_sync(thr->mutex) {
  193. // Store in_pos and *out_pos into *thr so that
  194. // an application may read them via
  195. // lzma_get_progress() to get progress information.
  196. //
  197. // NOTE: These aren't updated when the encoding
  198. // finishes. Instead, the final values are taken
  199. // later from thr->outbuf.
  200. thr->progress_in = in_pos;
  201. thr->progress_out = *out_pos;
  202. while (in_size == thr->in_size
  203. && thr->state == THR_RUN)
  204. mythread_cond_wait(&thr->cond, &thr->mutex);
  205. state = thr->state;
  206. in_size = thr->in_size;
  207. }
  208. // Return if we were asked to stop or exit.
  209. if (state >= THR_STOP)
  210. return state;
  211. lzma_action action = state == THR_FINISH
  212. ? LZMA_FINISH : LZMA_RUN;
  213. // Limit the amount of input given to the Block encoder
  214. // at once. This way this thread can react fairly quickly
  215. // if the main thread wants us to stop or exit.
  216. static const size_t in_chunk_max = 16384;
  217. size_t in_limit = in_size;
  218. if (in_size - in_pos > in_chunk_max) {
  219. in_limit = in_pos + in_chunk_max;
  220. action = LZMA_RUN;
  221. }
  222. ret = thr->block_encoder.code(
  223. thr->block_encoder.coder, thr->allocator,
  224. thr->in, &in_pos, in_limit, thr->outbuf->buf,
  225. out_pos, out_size, action);
  226. } while (ret == LZMA_OK && *out_pos < out_size);
  227. switch (ret) {
  228. case LZMA_STREAM_END:
  229. assert(state == THR_FINISH);
  230. // Encode the Block Header. By doing it after
  231. // the compression, we can store the Compressed Size
  232. // and Uncompressed Size fields.
  233. ret = lzma_block_header_encode(&thr->block_options,
  234. thr->outbuf->buf);
  235. if (ret != LZMA_OK) {
  236. worker_error(thr, ret);
  237. return THR_STOP;
  238. }
  239. break;
  240. case LZMA_OK:
  241. // The data was incompressible. Encode it using uncompressed
  242. // LZMA2 chunks.
  243. //
  244. // First wait that we have gotten all the input.
  245. mythread_sync(thr->mutex) {
  246. while (thr->state == THR_RUN)
  247. mythread_cond_wait(&thr->cond, &thr->mutex);
  248. state = thr->state;
  249. in_size = thr->in_size;
  250. }
  251. if (state >= THR_STOP)
  252. return state;
  253. // Do the encoding. This takes care of the Block Header too.
  254. *out_pos = 0;
  255. ret = lzma_block_uncomp_encode(&thr->block_options,
  256. thr->in, in_size, thr->outbuf->buf,
  257. out_pos, out_size);
  258. // It shouldn't fail.
  259. if (ret != LZMA_OK) {
  260. worker_error(thr, LZMA_PROG_ERROR);
  261. return THR_STOP;
  262. }
  263. break;
  264. default:
  265. worker_error(thr, ret);
  266. return THR_STOP;
  267. }
  268. // Set the size information that will be read by the main thread
  269. // to write the Index field.
  270. thr->outbuf->unpadded_size
  271. = lzma_block_unpadded_size(&thr->block_options);
  272. assert(thr->outbuf->unpadded_size != 0);
  273. thr->outbuf->uncompressed_size = thr->block_options.uncompressed_size;
  274. return THR_FINISH;
  275. }
  276. static MYTHREAD_RET_TYPE
  277. worker_start(void *thr_ptr)
  278. {
  279. worker_thread *thr = thr_ptr;
  280. worker_state state = THR_IDLE; // Init to silence a warning
  281. while (true) {
  282. // Wait for work.
  283. mythread_sync(thr->mutex) {
  284. while (true) {
  285. // The thread is already idle so if we are
  286. // requested to stop, just set the state.
  287. if (thr->state == THR_STOP) {
  288. thr->state = THR_IDLE;
  289. mythread_cond_signal(&thr->cond);
  290. }
  291. state = thr->state;
  292. if (state != THR_IDLE)
  293. break;
  294. mythread_cond_wait(&thr->cond, &thr->mutex);
  295. }
  296. }
  297. size_t out_pos = 0;
  298. assert(state != THR_IDLE);
  299. assert(state != THR_STOP);
  300. if (state <= THR_FINISH)
  301. state = worker_encode(thr, &out_pos, state);
  302. if (state == THR_EXIT)
  303. break;
  304. // Mark the thread as idle unless the main thread has
  305. // told us to exit. Signal is needed for the case
  306. // where the main thread is waiting for the threads to stop.
  307. mythread_sync(thr->mutex) {
  308. if (thr->state != THR_EXIT) {
  309. thr->state = THR_IDLE;
  310. mythread_cond_signal(&thr->cond);
  311. }
  312. }
  313. mythread_sync(thr->coder->mutex) {
  314. // If no errors occurred, make the encoded data
  315. // available to be copied out.
  316. if (state == THR_FINISH) {
  317. thr->outbuf->pos = out_pos;
  318. thr->outbuf->finished = true;
  319. }
  320. // Update the main progress info.
  321. thr->coder->progress_in
  322. += thr->outbuf->uncompressed_size;
  323. thr->coder->progress_out += out_pos;
  324. thr->progress_in = 0;
  325. thr->progress_out = 0;
  326. // Return this thread to the stack of free threads.
  327. thr->next = thr->coder->threads_free;
  328. thr->coder->threads_free = thr;
  329. mythread_cond_signal(&thr->coder->cond);
  330. }
  331. }
  332. // Exiting, free the resources.
  333. lzma_filters_free(thr->filters, thr->allocator);
  334. mythread_mutex_destroy(&thr->mutex);
  335. mythread_cond_destroy(&thr->cond);
  336. lzma_next_end(&thr->block_encoder, thr->allocator);
  337. lzma_free(thr->in, thr->allocator);
  338. return MYTHREAD_RET_VALUE;
  339. }
  340. /// Make the threads stop but not exit. Optionally wait for them to stop.
  341. static void
  342. threads_stop(lzma_stream_coder *coder, bool wait_for_threads)
  343. {
  344. // Tell the threads to stop.
  345. for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
  346. mythread_sync(coder->threads[i].mutex) {
  347. coder->threads[i].state = THR_STOP;
  348. mythread_cond_signal(&coder->threads[i].cond);
  349. }
  350. }
  351. if (!wait_for_threads)
  352. return;
  353. // Wait for the threads to settle in the idle state.
  354. for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
  355. mythread_sync(coder->threads[i].mutex) {
  356. while (coder->threads[i].state != THR_IDLE)
  357. mythread_cond_wait(&coder->threads[i].cond,
  358. &coder->threads[i].mutex);
  359. }
  360. }
  361. return;
  362. }
  363. /// Stop the threads and free the resources associated with them.
  364. /// Wait until the threads have exited.
  365. static void
  366. threads_end(lzma_stream_coder *coder, const lzma_allocator *allocator)
  367. {
  368. for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
  369. mythread_sync(coder->threads[i].mutex) {
  370. coder->threads[i].state = THR_EXIT;
  371. mythread_cond_signal(&coder->threads[i].cond);
  372. }
  373. }
  374. for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
  375. int ret = mythread_join(coder->threads[i].thread_id);
  376. assert(ret == 0);
  377. (void)ret;
  378. }
  379. lzma_free(coder->threads, allocator);
  380. return;
  381. }
  382. /// Initialize a new worker_thread structure and create a new thread.
  383. static lzma_ret
  384. initialize_new_thread(lzma_stream_coder *coder,
  385. const lzma_allocator *allocator)
  386. {
  387. worker_thread *thr = &coder->threads[coder->threads_initialized];
  388. thr->in = lzma_alloc(coder->block_size, allocator);
  389. if (thr->in == NULL)
  390. return LZMA_MEM_ERROR;
  391. if (mythread_mutex_init(&thr->mutex))
  392. goto error_mutex;
  393. if (mythread_cond_init(&thr->cond))
  394. goto error_cond;
  395. thr->state = THR_IDLE;
  396. thr->allocator = allocator;
  397. thr->coder = coder;
  398. thr->progress_in = 0;
  399. thr->progress_out = 0;
  400. thr->block_encoder = LZMA_NEXT_CODER_INIT;
  401. thr->filters[0].id = LZMA_VLI_UNKNOWN;
  402. if (mythread_create(&thr->thread_id, &worker_start, thr))
  403. goto error_thread;
  404. ++coder->threads_initialized;
  405. coder->thr = thr;
  406. return LZMA_OK;
  407. error_thread:
  408. mythread_cond_destroy(&thr->cond);
  409. error_cond:
  410. mythread_mutex_destroy(&thr->mutex);
  411. error_mutex:
  412. lzma_free(thr->in, allocator);
  413. return LZMA_MEM_ERROR;
  414. }
  415. static lzma_ret
  416. get_thread(lzma_stream_coder *coder, const lzma_allocator *allocator)
  417. {
  418. // If there are no free output subqueues, there is no
  419. // point to try getting a thread.
  420. if (!lzma_outq_has_buf(&coder->outq))
  421. return LZMA_OK;
  422. // That's also true if we cannot allocate memory for the output
  423. // buffer in the output queue.
  424. return_if_error(lzma_outq_prealloc_buf(&coder->outq, allocator,
  425. coder->outbuf_alloc_size));
  426. // Make a thread-specific copy of the filter chain. Put it in
  427. // the cache array first so that if we cannot get a new thread yet,
  428. // the allocation is ready when we try again.
  429. if (coder->filters_cache[0].id == LZMA_VLI_UNKNOWN)
  430. return_if_error(lzma_filters_copy(
  431. coder->filters, coder->filters_cache, allocator));
  432. // If there is a free structure on the stack, use it.
  433. mythread_sync(coder->mutex) {
  434. if (coder->threads_free != NULL) {
  435. coder->thr = coder->threads_free;
  436. coder->threads_free = coder->threads_free->next;
  437. }
  438. }
  439. if (coder->thr == NULL) {
  440. // If there are no uninitialized structures left, return.
  441. if (coder->threads_initialized == coder->threads_max)
  442. return LZMA_OK;
  443. // Initialize a new thread.
  444. return_if_error(initialize_new_thread(coder, allocator));
  445. }
  446. // Reset the parts of the thread state that have to be done
  447. // in the main thread.
  448. mythread_sync(coder->thr->mutex) {
  449. coder->thr->state = THR_RUN;
  450. coder->thr->in_size = 0;
  451. coder->thr->outbuf = lzma_outq_get_buf(&coder->outq, NULL);
  452. // Free the old thread-specific filter options and replace
  453. // them with the already-allocated new options from
  454. // coder->filters_cache[]. Then mark the cache as empty.
  455. lzma_filters_free(coder->thr->filters, allocator);
  456. memcpy(coder->thr->filters, coder->filters_cache,
  457. sizeof(coder->filters_cache));
  458. coder->filters_cache[0].id = LZMA_VLI_UNKNOWN;
  459. mythread_cond_signal(&coder->thr->cond);
  460. }
  461. return LZMA_OK;
  462. }
  463. static lzma_ret
  464. stream_encode_in(lzma_stream_coder *coder, const lzma_allocator *allocator,
  465. const uint8_t *restrict in, size_t *restrict in_pos,
  466. size_t in_size, lzma_action action)
  467. {
  468. while (*in_pos < in_size
  469. || (coder->thr != NULL && action != LZMA_RUN)) {
  470. if (coder->thr == NULL) {
  471. // Get a new thread.
  472. const lzma_ret ret = get_thread(coder, allocator);
  473. if (coder->thr == NULL)
  474. return ret;
  475. }
  476. // Copy the input data to thread's buffer.
  477. size_t thr_in_size = coder->thr->in_size;
  478. lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
  479. &thr_in_size, coder->block_size);
  480. // Tell the Block encoder to finish if
  481. // - it has got block_size bytes of input; or
  482. // - all input was used and LZMA_FINISH, LZMA_FULL_FLUSH,
  483. // or LZMA_FULL_BARRIER was used.
  484. //
  485. // TODO: LZMA_SYNC_FLUSH and LZMA_SYNC_BARRIER.
  486. const bool finish = thr_in_size == coder->block_size
  487. || (*in_pos == in_size && action != LZMA_RUN);
  488. bool block_error = false;
  489. mythread_sync(coder->thr->mutex) {
  490. if (coder->thr->state == THR_IDLE) {
  491. // Something has gone wrong with the Block
  492. // encoder. It has set coder->thread_error
  493. // which we will read a few lines later.
  494. block_error = true;
  495. } else {
  496. // Tell the Block encoder its new amount
  497. // of input and update the state if needed.
  498. coder->thr->in_size = thr_in_size;
  499. if (finish)
  500. coder->thr->state = THR_FINISH;
  501. mythread_cond_signal(&coder->thr->cond);
  502. }
  503. }
  504. if (block_error) {
  505. lzma_ret ret;
  506. mythread_sync(coder->mutex) {
  507. ret = coder->thread_error;
  508. }
  509. return ret;
  510. }
  511. if (finish)
  512. coder->thr = NULL;
  513. }
  514. return LZMA_OK;
  515. }
  516. /// Wait until more input can be consumed, more output can be read, or
  517. /// an optional timeout is reached.
  518. static bool
  519. wait_for_work(lzma_stream_coder *coder, mythread_condtime *wait_abs,
  520. bool *has_blocked, bool has_input)
  521. {
  522. if (coder->timeout != 0 && !*has_blocked) {
  523. // Every time when stream_encode_mt() is called via
  524. // lzma_code(), *has_blocked starts as false. We set it
  525. // to true here and calculate the absolute time when
  526. // we must return if there's nothing to do.
  527. //
  528. // This way if we block multiple times for short moments
  529. // less than "timeout" milliseconds, we will return once
  530. // "timeout" amount of time has passed since the *first*
  531. // blocking occurred. If the absolute time was calculated
  532. // again every time we block, "timeout" would effectively
  533. // be meaningless if we never consecutively block longer
  534. // than "timeout" ms.
  535. *has_blocked = true;
  536. mythread_condtime_set(wait_abs, &coder->cond, coder->timeout);
  537. }
  538. bool timed_out = false;
  539. mythread_sync(coder->mutex) {
  540. // There are four things that we wait. If one of them
  541. // becomes possible, we return.
  542. // - If there is input left, we need to get a free
  543. // worker thread and an output buffer for it.
  544. // - Data ready to be read from the output queue.
  545. // - A worker thread indicates an error.
  546. // - Time out occurs.
  547. while ((!has_input || coder->threads_free == NULL
  548. || !lzma_outq_has_buf(&coder->outq))
  549. && !lzma_outq_is_readable(&coder->outq)
  550. && coder->thread_error == LZMA_OK
  551. && !timed_out) {
  552. if (coder->timeout != 0)
  553. timed_out = mythread_cond_timedwait(
  554. &coder->cond, &coder->mutex,
  555. wait_abs) != 0;
  556. else
  557. mythread_cond_wait(&coder->cond,
  558. &coder->mutex);
  559. }
  560. }
  561. return timed_out;
  562. }
  563. static lzma_ret
  564. stream_encode_mt(void *coder_ptr, const lzma_allocator *allocator,
  565. const uint8_t *restrict in, size_t *restrict in_pos,
  566. size_t in_size, uint8_t *restrict out,
  567. size_t *restrict out_pos, size_t out_size, lzma_action action)
  568. {
  569. lzma_stream_coder *coder = coder_ptr;
  570. switch (coder->sequence) {
  571. case SEQ_STREAM_HEADER:
  572. lzma_bufcpy(coder->header, &coder->header_pos,
  573. sizeof(coder->header),
  574. out, out_pos, out_size);
  575. if (coder->header_pos < sizeof(coder->header))
  576. return LZMA_OK;
  577. coder->header_pos = 0;
  578. coder->sequence = SEQ_BLOCK;
  579. // Fall through
  580. case SEQ_BLOCK: {
  581. // Initialized to silence warnings.
  582. lzma_vli unpadded_size = 0;
  583. lzma_vli uncompressed_size = 0;
  584. lzma_ret ret = LZMA_OK;
  585. // These are for wait_for_work().
  586. bool has_blocked = false;
  587. mythread_condtime wait_abs;
  588. while (true) {
  589. mythread_sync(coder->mutex) {
  590. // Check for Block encoder errors.
  591. ret = coder->thread_error;
  592. if (ret != LZMA_OK) {
  593. assert(ret != LZMA_STREAM_END);
  594. break; // Break out of mythread_sync.
  595. }
  596. // Try to read compressed data to out[].
  597. ret = lzma_outq_read(&coder->outq, allocator,
  598. out, out_pos, out_size,
  599. &unpadded_size,
  600. &uncompressed_size);
  601. }
  602. if (ret == LZMA_STREAM_END) {
  603. // End of Block. Add it to the Index.
  604. ret = lzma_index_append(coder->index,
  605. allocator, unpadded_size,
  606. uncompressed_size);
  607. if (ret != LZMA_OK) {
  608. threads_stop(coder, false);
  609. return ret;
  610. }
  611. // If we didn't fill the output buffer yet,
  612. // try to read more data. Maybe the next
  613. // outbuf has been finished already too.
  614. if (*out_pos < out_size)
  615. continue;
  616. }
  617. if (ret != LZMA_OK) {
  618. // coder->thread_error was set.
  619. threads_stop(coder, false);
  620. return ret;
  621. }
  622. // Try to give uncompressed data to a worker thread.
  623. ret = stream_encode_in(coder, allocator,
  624. in, in_pos, in_size, action);
  625. if (ret != LZMA_OK) {
  626. threads_stop(coder, false);
  627. return ret;
  628. }
  629. // See if we should wait or return.
  630. //
  631. // TODO: LZMA_SYNC_FLUSH and LZMA_SYNC_BARRIER.
  632. if (*in_pos == in_size) {
  633. // LZMA_RUN: More data is probably coming
  634. // so return to let the caller fill the
  635. // input buffer.
  636. if (action == LZMA_RUN)
  637. return LZMA_OK;
  638. // LZMA_FULL_BARRIER: The same as with
  639. // LZMA_RUN but tell the caller that the
  640. // barrier was completed.
  641. if (action == LZMA_FULL_BARRIER)
  642. return LZMA_STREAM_END;
  643. // Finishing or flushing isn't completed until
  644. // all input data has been encoded and copied
  645. // to the output buffer.
  646. if (lzma_outq_is_empty(&coder->outq)) {
  647. // LZMA_FINISH: Continue to encode
  648. // the Index field.
  649. if (action == LZMA_FINISH)
  650. break;
  651. // LZMA_FULL_FLUSH: Return to tell
  652. // the caller that flushing was
  653. // completed.
  654. if (action == LZMA_FULL_FLUSH)
  655. return LZMA_STREAM_END;
  656. }
  657. }
  658. // Return if there is no output space left.
  659. // This check must be done after testing the input
  660. // buffer, because we might want to use a different
  661. // return code.
  662. if (*out_pos == out_size)
  663. return LZMA_OK;
  664. // Neither in nor out has been used completely.
  665. // Wait until there's something we can do.
  666. if (wait_for_work(coder, &wait_abs, &has_blocked,
  667. *in_pos < in_size))
  668. return LZMA_TIMED_OUT;
  669. }
  670. // All Blocks have been encoded and the threads have stopped.
  671. // Prepare to encode the Index field.
  672. return_if_error(lzma_index_encoder_init(
  673. &coder->index_encoder, allocator,
  674. coder->index));
  675. coder->sequence = SEQ_INDEX;
  676. // Update the progress info to take the Index and
  677. // Stream Footer into account. Those are very fast to encode
  678. // so in terms of progress information they can be thought
  679. // to be ready to be copied out.
  680. coder->progress_out += lzma_index_size(coder->index)
  681. + LZMA_STREAM_HEADER_SIZE;
  682. }
  683. // Fall through
  684. case SEQ_INDEX: {
  685. // Call the Index encoder. It doesn't take any input, so
  686. // those pointers can be NULL.
  687. const lzma_ret ret = coder->index_encoder.code(
  688. coder->index_encoder.coder, allocator,
  689. NULL, NULL, 0,
  690. out, out_pos, out_size, LZMA_RUN);
  691. if (ret != LZMA_STREAM_END)
  692. return ret;
  693. // Encode the Stream Footer into coder->buffer.
  694. coder->stream_flags.backward_size
  695. = lzma_index_size(coder->index);
  696. if (lzma_stream_footer_encode(&coder->stream_flags,
  697. coder->header) != LZMA_OK)
  698. return LZMA_PROG_ERROR;
  699. coder->sequence = SEQ_STREAM_FOOTER;
  700. }
  701. // Fall through
  702. case SEQ_STREAM_FOOTER:
  703. lzma_bufcpy(coder->header, &coder->header_pos,
  704. sizeof(coder->header),
  705. out, out_pos, out_size);
  706. return coder->header_pos < sizeof(coder->header)
  707. ? LZMA_OK : LZMA_STREAM_END;
  708. }
  709. assert(0);
  710. return LZMA_PROG_ERROR;
  711. }
  712. static void
  713. stream_encoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
  714. {
  715. lzma_stream_coder *coder = coder_ptr;
  716. // Threads must be killed before the output queue can be freed.
  717. threads_end(coder, allocator);
  718. lzma_outq_end(&coder->outq, allocator);
  719. lzma_filters_free(coder->filters, allocator);
  720. lzma_filters_free(coder->filters_cache, allocator);
  721. lzma_next_end(&coder->index_encoder, allocator);
  722. lzma_index_end(coder->index, allocator);
  723. mythread_cond_destroy(&coder->cond);
  724. mythread_mutex_destroy(&coder->mutex);
  725. lzma_free(coder, allocator);
  726. return;
  727. }
  728. static lzma_ret
  729. stream_encoder_mt_update(void *coder_ptr, const lzma_allocator *allocator,
  730. const lzma_filter *filters,
  731. const lzma_filter *reversed_filters
  732. lzma_attribute((__unused__)))
  733. {
  734. lzma_stream_coder *coder = coder_ptr;
  735. // Applications shouldn't attempt to change the options when
  736. // we are already encoding the Index or Stream Footer.
  737. if (coder->sequence > SEQ_BLOCK)
  738. return LZMA_PROG_ERROR;
  739. // For now the threaded encoder doesn't support changing
  740. // the options in the middle of a Block.
  741. if (coder->thr != NULL)
  742. return LZMA_PROG_ERROR;
  743. // Check if the filter chain seems mostly valid. See the comment
  744. // in stream_encoder_mt_init().
  745. if (lzma_raw_encoder_memusage(filters) == UINT64_MAX)
  746. return LZMA_OPTIONS_ERROR;
  747. // Make a copy to a temporary buffer first. This way the encoder
  748. // state stays unchanged if an error occurs in lzma_filters_copy().
  749. lzma_filter temp[LZMA_FILTERS_MAX + 1];
  750. return_if_error(lzma_filters_copy(filters, temp, allocator));
  751. // Free the options of the old chain as well as the cache.
  752. lzma_filters_free(coder->filters, allocator);
  753. lzma_filters_free(coder->filters_cache, allocator);
  754. // Copy the new filter chain in place.
  755. memcpy(coder->filters, temp, sizeof(temp));
  756. return LZMA_OK;
  757. }
  758. /// Options handling for lzma_stream_encoder_mt_init() and
  759. /// lzma_stream_encoder_mt_memusage()
  760. static lzma_ret
  761. get_options(const lzma_mt *options, lzma_options_easy *opt_easy,
  762. const lzma_filter **filters, uint64_t *block_size,
  763. uint64_t *outbuf_size_max)
  764. {
  765. // Validate some of the options.
  766. if (options == NULL)
  767. return LZMA_PROG_ERROR;
  768. if (options->flags != 0 || options->threads == 0
  769. || options->threads > LZMA_THREADS_MAX)
  770. return LZMA_OPTIONS_ERROR;
  771. if (options->filters != NULL) {
  772. // Filter chain was given, use it as is.
  773. *filters = options->filters;
  774. } else {
  775. // Use a preset.
  776. if (lzma_easy_preset(opt_easy, options->preset))
  777. return LZMA_OPTIONS_ERROR;
  778. *filters = opt_easy->filters;
  779. }
  780. // Block size
  781. if (options->block_size > 0) {
  782. if (options->block_size > BLOCK_SIZE_MAX)
  783. return LZMA_OPTIONS_ERROR;
  784. *block_size = options->block_size;
  785. } else {
  786. // Determine the Block size from the filter chain.
  787. *block_size = lzma_mt_block_size(*filters);
  788. if (*block_size == 0)
  789. return LZMA_OPTIONS_ERROR;
  790. assert(*block_size <= BLOCK_SIZE_MAX);
  791. }
  792. // Calculate the maximum amount output that a single output buffer
  793. // may need to hold. This is the same as the maximum total size of
  794. // a Block.
  795. *outbuf_size_max = lzma_block_buffer_bound64(*block_size);
  796. if (*outbuf_size_max == 0)
  797. return LZMA_MEM_ERROR;
  798. return LZMA_OK;
  799. }
  800. static void
  801. get_progress(void *coder_ptr, uint64_t *progress_in, uint64_t *progress_out)
  802. {
  803. lzma_stream_coder *coder = coder_ptr;
  804. // Lock coder->mutex to prevent finishing threads from moving their
  805. // progress info from the worker_thread structure to lzma_stream_coder.
  806. mythread_sync(coder->mutex) {
  807. *progress_in = coder->progress_in;
  808. *progress_out = coder->progress_out;
  809. for (size_t i = 0; i < coder->threads_initialized; ++i) {
  810. mythread_sync(coder->threads[i].mutex) {
  811. *progress_in += coder->threads[i].progress_in;
  812. *progress_out += coder->threads[i]
  813. .progress_out;
  814. }
  815. }
  816. }
  817. return;
  818. }
  819. static lzma_ret
  820. stream_encoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
  821. const lzma_mt *options)
  822. {
  823. lzma_next_coder_init(&stream_encoder_mt_init, next, allocator);
  824. // Get the filter chain.
  825. lzma_options_easy easy;
  826. const lzma_filter *filters;
  827. uint64_t block_size;
  828. uint64_t outbuf_size_max;
  829. return_if_error(get_options(options, &easy, &filters,
  830. &block_size, &outbuf_size_max));
  831. #if SIZE_MAX < UINT64_MAX
  832. if (block_size > SIZE_MAX || outbuf_size_max > SIZE_MAX)
  833. return LZMA_MEM_ERROR;
  834. #endif
  835. // Validate the filter chain so that we can give an error in this
  836. // function instead of delaying it to the first call to lzma_code().
  837. // The memory usage calculation verifies the filter chain as
  838. // a side effect so we take advantage of that. It's not a perfect
  839. // check though as raw encoder allows LZMA1 too but such problems
  840. // will be caught eventually with Block Header encoder.
  841. if (lzma_raw_encoder_memusage(filters) == UINT64_MAX)
  842. return LZMA_OPTIONS_ERROR;
  843. // Validate the Check ID.
  844. if ((unsigned int)(options->check) > LZMA_CHECK_ID_MAX)
  845. return LZMA_PROG_ERROR;
  846. if (!lzma_check_is_supported(options->check))
  847. return LZMA_UNSUPPORTED_CHECK;
  848. // Allocate and initialize the base structure if needed.
  849. lzma_stream_coder *coder = next->coder;
  850. if (coder == NULL) {
  851. coder = lzma_alloc(sizeof(lzma_stream_coder), allocator);
  852. if (coder == NULL)
  853. return LZMA_MEM_ERROR;
  854. next->coder = coder;
  855. // For the mutex and condition variable initializations
  856. // the error handling has to be done here because
  857. // stream_encoder_mt_end() doesn't know if they have
  858. // already been initialized or not.
  859. if (mythread_mutex_init(&coder->mutex)) {
  860. lzma_free(coder, allocator);
  861. next->coder = NULL;
  862. return LZMA_MEM_ERROR;
  863. }
  864. if (mythread_cond_init(&coder->cond)) {
  865. mythread_mutex_destroy(&coder->mutex);
  866. lzma_free(coder, allocator);
  867. next->coder = NULL;
  868. return LZMA_MEM_ERROR;
  869. }
  870. next->code = &stream_encode_mt;
  871. next->end = &stream_encoder_mt_end;
  872. next->get_progress = &get_progress;
  873. next->update = &stream_encoder_mt_update;
  874. coder->filters[0].id = LZMA_VLI_UNKNOWN;
  875. coder->filters_cache[0].id = LZMA_VLI_UNKNOWN;
  876. coder->index_encoder = LZMA_NEXT_CODER_INIT;
  877. coder->index = NULL;
  878. memzero(&coder->outq, sizeof(coder->outq));
  879. coder->threads = NULL;
  880. coder->threads_max = 0;
  881. coder->threads_initialized = 0;
  882. }
  883. // Basic initializations
  884. coder->sequence = SEQ_STREAM_HEADER;
  885. coder->block_size = (size_t)(block_size);
  886. coder->outbuf_alloc_size = (size_t)(outbuf_size_max);
  887. coder->thread_error = LZMA_OK;
  888. coder->thr = NULL;
  889. // Allocate the thread-specific base structures.
  890. assert(options->threads > 0);
  891. if (coder->threads_max != options->threads) {
  892. threads_end(coder, allocator);
  893. coder->threads = NULL;
  894. coder->threads_max = 0;
  895. coder->threads_initialized = 0;
  896. coder->threads_free = NULL;
  897. coder->threads = lzma_alloc(
  898. options->threads * sizeof(worker_thread),
  899. allocator);
  900. if (coder->threads == NULL)
  901. return LZMA_MEM_ERROR;
  902. coder->threads_max = options->threads;
  903. } else {
  904. // Reuse the old structures and threads. Tell the running
  905. // threads to stop and wait until they have stopped.
  906. threads_stop(coder, true);
  907. }
  908. // Output queue
  909. return_if_error(lzma_outq_init(&coder->outq, allocator,
  910. options->threads));
  911. // Timeout
  912. coder->timeout = options->timeout;
  913. // Free the old filter chain and the cache.
  914. lzma_filters_free(coder->filters, allocator);
  915. lzma_filters_free(coder->filters_cache, allocator);
  916. // Copy the new filter chain.
  917. return_if_error(lzma_filters_copy(
  918. filters, coder->filters, allocator));
  919. // Index
  920. lzma_index_end(coder->index, allocator);
  921. coder->index = lzma_index_init(allocator);
  922. if (coder->index == NULL)
  923. return LZMA_MEM_ERROR;
  924. // Stream Header
  925. coder->stream_flags.version = 0;
  926. coder->stream_flags.check = options->check;
  927. return_if_error(lzma_stream_header_encode(
  928. &coder->stream_flags, coder->header));
  929. coder->header_pos = 0;
  930. // Progress info
  931. coder->progress_in = 0;
  932. coder->progress_out = LZMA_STREAM_HEADER_SIZE;
  933. return LZMA_OK;
  934. }
  935. #ifdef HAVE_SYMBOL_VERSIONS_LINUX
  936. // These are for compatibility with binaries linked against liblzma that
  937. // has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.
  938. // Actually that patch didn't create lzma_stream_encoder_mt@XZ_5.2.2
  939. // but it has been added here anyway since someone might misread the
  940. // RHEL patch and think both @XZ_5.1.2alpha and @XZ_5.2.2 exist.
  941. LZMA_SYMVER_API("lzma_stream_encoder_mt@XZ_5.1.2alpha",
  942. lzma_ret, lzma_stream_encoder_mt_512a)(
  943. lzma_stream *strm, const lzma_mt *options)
  944. lzma_nothrow lzma_attr_warn_unused_result
  945. __attribute__((__alias__("lzma_stream_encoder_mt_52")));
  946. LZMA_SYMVER_API("lzma_stream_encoder_mt@XZ_5.2.2",
  947. lzma_ret, lzma_stream_encoder_mt_522)(
  948. lzma_stream *strm, const lzma_mt *options)
  949. lzma_nothrow lzma_attr_warn_unused_result
  950. __attribute__((__alias__("lzma_stream_encoder_mt_52")));
  951. LZMA_SYMVER_API("lzma_stream_encoder_mt@@XZ_5.2",
  952. lzma_ret, lzma_stream_encoder_mt_52)(
  953. lzma_stream *strm, const lzma_mt *options)
  954. lzma_nothrow lzma_attr_warn_unused_result;
  955. #define lzma_stream_encoder_mt lzma_stream_encoder_mt_52
  956. #endif
  957. extern LZMA_API(lzma_ret)
  958. lzma_stream_encoder_mt(lzma_stream *strm, const lzma_mt *options)
  959. {
  960. lzma_next_strm_init(stream_encoder_mt_init, strm, options);
  961. strm->internal->supported_actions[LZMA_RUN] = true;
  962. // strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true;
  963. strm->internal->supported_actions[LZMA_FULL_FLUSH] = true;
  964. strm->internal->supported_actions[LZMA_FULL_BARRIER] = true;
  965. strm->internal->supported_actions[LZMA_FINISH] = true;
  966. return LZMA_OK;
  967. }
  968. #ifdef HAVE_SYMBOL_VERSIONS_LINUX
  969. LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@XZ_5.1.2alpha",
  970. uint64_t, lzma_stream_encoder_mt_memusage_512a)(
  971. const lzma_mt *options) lzma_nothrow lzma_attr_pure
  972. __attribute__((__alias__("lzma_stream_encoder_mt_memusage_52")));
  973. LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@XZ_5.2.2",
  974. uint64_t, lzma_stream_encoder_mt_memusage_522)(
  975. const lzma_mt *options) lzma_nothrow lzma_attr_pure
  976. __attribute__((__alias__("lzma_stream_encoder_mt_memusage_52")));
  977. LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@@XZ_5.2",
  978. uint64_t, lzma_stream_encoder_mt_memusage_52)(
  979. const lzma_mt *options) lzma_nothrow lzma_attr_pure;
  980. #define lzma_stream_encoder_mt_memusage lzma_stream_encoder_mt_memusage_52
  981. #endif
  982. // This function name is a monster but it's consistent with the older
  983. // monster names. :-( 31 chars is the max that C99 requires so in that
  984. // sense it's not too long. ;-)
  985. extern LZMA_API(uint64_t)
  986. lzma_stream_encoder_mt_memusage(const lzma_mt *options)
  987. {
  988. lzma_options_easy easy;
  989. const lzma_filter *filters;
  990. uint64_t block_size;
  991. uint64_t outbuf_size_max;
  992. if (get_options(options, &easy, &filters, &block_size,
  993. &outbuf_size_max) != LZMA_OK)
  994. return UINT64_MAX;
  995. // Memory usage of the input buffers
  996. const uint64_t inbuf_memusage = options->threads * block_size;
  997. // Memory usage of the filter encoders
  998. uint64_t filters_memusage = lzma_raw_encoder_memusage(filters);
  999. if (filters_memusage == UINT64_MAX)
  1000. return UINT64_MAX;
  1001. filters_memusage *= options->threads;
  1002. // Memory usage of the output queue
  1003. const uint64_t outq_memusage = lzma_outq_memusage(
  1004. outbuf_size_max, options->threads);
  1005. if (outq_memusage == UINT64_MAX)
  1006. return UINT64_MAX;
  1007. // Sum them with overflow checking.
  1008. uint64_t total_memusage = LZMA_MEMUSAGE_BASE
  1009. + sizeof(lzma_stream_coder)
  1010. + options->threads * sizeof(worker_thread);
  1011. if (UINT64_MAX - total_memusage < inbuf_memusage)
  1012. return UINT64_MAX;
  1013. total_memusage += inbuf_memusage;
  1014. if (UINT64_MAX - total_memusage < filters_memusage)
  1015. return UINT64_MAX;
  1016. total_memusage += filters_memusage;
  1017. if (UINT64_MAX - total_memusage < outq_memusage)
  1018. return UINT64_MAX;
  1019. return total_memusage + outq_memusage;
  1020. }