compress.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*-------------------------------------------------------------*/
  2. /*--- Compression machinery (not incl block sorting) ---*/
  3. /*--- compress.c ---*/
  4. /*-------------------------------------------------------------*/
  5. /* ------------------------------------------------------------------
  6. This file is part of bzip2/libbzip2, a program and library for
  7. lossless, block-sorting data compression.
  8. bzip2/libbzip2 version 1.0.8 of 13 July 2019
  9. Copyright (C) 1996-2019 Julian Seward <jseward@acm.org>
  10. Please read the WARNING, DISCLAIMER and PATENTS sections in the
  11. README file.
  12. This program is released under the terms of the license contained
  13. in the file LICENSE.
  14. ------------------------------------------------------------------ */
  15. /* CHANGES
  16. 0.9.0 -- original version.
  17. 0.9.0a/b -- no changes in this file.
  18. 0.9.0c -- changed setting of nGroups in sendMTFValues()
  19. so as to do a bit better on small files
  20. */
  21. #include "bzlib_private.h"
  22. /*---------------------------------------------------*/
  23. /*--- Bit stream I/O ---*/
  24. /*---------------------------------------------------*/
  25. /*---------------------------------------------------*/
  26. void BZ2_bsInitWrite ( EState* s )
  27. {
  28. s->bsLive = 0;
  29. s->bsBuff = 0;
  30. }
  31. /*---------------------------------------------------*/
  32. static
  33. void bsFinishWrite ( EState* s )
  34. {
  35. while (s->bsLive > 0) {
  36. s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
  37. s->numZ++;
  38. s->bsBuff <<= 8;
  39. s->bsLive -= 8;
  40. }
  41. }
  42. /*---------------------------------------------------*/
  43. #define bsNEEDW(nz) \
  44. { \
  45. while (s->bsLive >= 8) { \
  46. s->zbits[s->numZ] \
  47. = (UChar)(s->bsBuff >> 24); \
  48. s->numZ++; \
  49. s->bsBuff <<= 8; \
  50. s->bsLive -= 8; \
  51. } \
  52. }
  53. /*---------------------------------------------------*/
  54. static
  55. __inline__
  56. void bsW ( EState* s, Int32 n, UInt32 v )
  57. {
  58. bsNEEDW ( n );
  59. s->bsBuff |= (v << (32 - s->bsLive - n));
  60. s->bsLive += n;
  61. }
  62. /*---------------------------------------------------*/
  63. static
  64. void bsPutUInt32 ( EState* s, UInt32 u )
  65. {
  66. bsW ( s, 8, (u >> 24) & 0xffL );
  67. bsW ( s, 8, (u >> 16) & 0xffL );
  68. bsW ( s, 8, (u >> 8) & 0xffL );
  69. bsW ( s, 8, u & 0xffL );
  70. }
  71. /*---------------------------------------------------*/
  72. static
  73. void bsPutUChar ( EState* s, UChar c )
  74. {
  75. bsW( s, 8, (UInt32)c );
  76. }
  77. /*---------------------------------------------------*/
  78. /*--- The back end proper ---*/
  79. /*---------------------------------------------------*/
  80. /*---------------------------------------------------*/
  81. static
  82. void makeMaps_e ( EState* s )
  83. {
  84. Int32 i;
  85. s->nInUse = 0;
  86. for (i = 0; i < 256; i++)
  87. if (s->inUse[i]) {
  88. s->unseqToSeq[i] = s->nInUse;
  89. s->nInUse++;
  90. }
  91. }
  92. /*---------------------------------------------------*/
  93. static
  94. void generateMTFValues ( EState* s )
  95. {
  96. UChar yy[256];
  97. Int32 i, j;
  98. Int32 zPend;
  99. Int32 wr;
  100. Int32 EOB;
  101. /*
  102. After sorting (eg, here),
  103. s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
  104. and
  105. ((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
  106. holds the original block data.
  107. The first thing to do is generate the MTF values,
  108. and put them in
  109. ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
  110. Because there are strictly fewer or equal MTF values
  111. than block values, ptr values in this area are overwritten
  112. with MTF values only when they are no longer needed.
  113. The final compressed bitstream is generated into the
  114. area starting at
  115. (UChar*) (&((UChar*)s->arr2)[s->nblock])
  116. These storage aliases are set up in bzCompressInit(),
  117. except for the last one, which is arranged in
  118. compressBlock().
  119. */
  120. UInt32* ptr = s->ptr;
  121. UChar* block = s->block;
  122. UInt16* mtfv = s->mtfv;
  123. makeMaps_e ( s );
  124. EOB = s->nInUse+1;
  125. for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
  126. wr = 0;
  127. zPend = 0;
  128. for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
  129. for (i = 0; i < s->nblock; i++) {
  130. UChar ll_i;
  131. AssertD ( wr <= i, "generateMTFValues(1)" );
  132. j = ptr[i]-1; if (j < 0) j += s->nblock;
  133. ll_i = s->unseqToSeq[block[j]];
  134. AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
  135. if (yy[0] == ll_i) {
  136. zPend++;
  137. } else {
  138. if (zPend > 0) {
  139. zPend--;
  140. while (True) {
  141. if (zPend & 1) {
  142. mtfv[wr] = BZ_RUNB; wr++;
  143. s->mtfFreq[BZ_RUNB]++;
  144. } else {
  145. mtfv[wr] = BZ_RUNA; wr++;
  146. s->mtfFreq[BZ_RUNA]++;
  147. }
  148. if (zPend < 2) break;
  149. zPend = (zPend - 2) / 2;
  150. };
  151. zPend = 0;
  152. }
  153. {
  154. register UChar rtmp;
  155. register UChar* ryy_j;
  156. register UChar rll_i;
  157. rtmp = yy[1];
  158. yy[1] = yy[0];
  159. ryy_j = &(yy[1]);
  160. rll_i = ll_i;
  161. while ( rll_i != rtmp ) {
  162. register UChar rtmp2;
  163. ryy_j++;
  164. rtmp2 = rtmp;
  165. rtmp = *ryy_j;
  166. *ryy_j = rtmp2;
  167. };
  168. yy[0] = rtmp;
  169. j = ryy_j - &(yy[0]);
  170. mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
  171. }
  172. }
  173. }
  174. if (zPend > 0) {
  175. zPend--;
  176. while (True) {
  177. if (zPend & 1) {
  178. mtfv[wr] = BZ_RUNB; wr++;
  179. s->mtfFreq[BZ_RUNB]++;
  180. } else {
  181. mtfv[wr] = BZ_RUNA; wr++;
  182. s->mtfFreq[BZ_RUNA]++;
  183. }
  184. if (zPend < 2) break;
  185. zPend = (zPend - 2) / 2;
  186. };
  187. zPend = 0;
  188. }
  189. mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
  190. s->nMTF = wr;
  191. }
  192. /*---------------------------------------------------*/
  193. #define BZ_LESSER_ICOST 0
  194. #define BZ_GREATER_ICOST 15
  195. static
  196. void sendMTFValues ( EState* s )
  197. {
  198. Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
  199. Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
  200. Int32 nGroups, nBytes;
  201. /*--
  202. UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  203. is a global since the decoder also needs it.
  204. Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  205. Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  206. are also globals only used in this proc.
  207. Made global to keep stack frame size small.
  208. --*/
  209. UInt16 cost[BZ_N_GROUPS];
  210. Int32 fave[BZ_N_GROUPS];
  211. UInt16* mtfv = s->mtfv;
  212. if (s->verbosity >= 3)
  213. VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
  214. "%d+2 syms in use\n",
  215. s->nblock, s->nMTF, s->nInUse );
  216. alphaSize = s->nInUse+2;
  217. for (t = 0; t < BZ_N_GROUPS; t++)
  218. for (v = 0; v < alphaSize; v++)
  219. s->len[t][v] = BZ_GREATER_ICOST;
  220. /*--- Decide how many coding tables to use ---*/
  221. AssertH ( s->nMTF > 0, 3001 );
  222. if (s->nMTF < 200) nGroups = 2; else
  223. if (s->nMTF < 600) nGroups = 3; else
  224. if (s->nMTF < 1200) nGroups = 4; else
  225. if (s->nMTF < 2400) nGroups = 5; else
  226. nGroups = 6;
  227. /*--- Generate an initial set of coding tables ---*/
  228. {
  229. Int32 nPart, remF, tFreq, aFreq;
  230. nPart = nGroups;
  231. remF = s->nMTF;
  232. gs = 0;
  233. while (nPart > 0) {
  234. tFreq = remF / nPart;
  235. ge = gs-1;
  236. aFreq = 0;
  237. while (aFreq < tFreq && ge < alphaSize-1) {
  238. ge++;
  239. aFreq += s->mtfFreq[ge];
  240. }
  241. if (ge > gs
  242. && nPart != nGroups && nPart != 1
  243. && ((nGroups-nPart) % 2 == 1)) {
  244. aFreq -= s->mtfFreq[ge];
  245. ge--;
  246. }
  247. if (s->verbosity >= 3)
  248. VPrintf5( " initial group %d, [%d .. %d], "
  249. "has %d syms (%4.1f%%)\n",
  250. nPart, gs, ge, aFreq,
  251. (100.0 * (float)aFreq) / (float)(s->nMTF) );
  252. for (v = 0; v < alphaSize; v++)
  253. if (v >= gs && v <= ge)
  254. s->len[nPart-1][v] = BZ_LESSER_ICOST; else
  255. s->len[nPart-1][v] = BZ_GREATER_ICOST;
  256. nPart--;
  257. gs = ge+1;
  258. remF -= aFreq;
  259. }
  260. }
  261. /*---
  262. Iterate up to BZ_N_ITERS times to improve the tables.
  263. ---*/
  264. for (iter = 0; iter < BZ_N_ITERS; iter++) {
  265. for (t = 0; t < nGroups; t++) fave[t] = 0;
  266. for (t = 0; t < nGroups; t++)
  267. for (v = 0; v < alphaSize; v++)
  268. s->rfreq[t][v] = 0;
  269. /*---
  270. Set up an auxiliary length table which is used to fast-track
  271. the common case (nGroups == 6).
  272. ---*/
  273. if (nGroups == 6) {
  274. for (v = 0; v < alphaSize; v++) {
  275. s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
  276. s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
  277. s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
  278. }
  279. }
  280. nSelectors = 0;
  281. totc = 0;
  282. gs = 0;
  283. while (True) {
  284. /*--- Set group start & end marks. --*/
  285. if (gs >= s->nMTF) break;
  286. ge = gs + BZ_G_SIZE - 1;
  287. if (ge >= s->nMTF) ge = s->nMTF-1;
  288. /*--
  289. Calculate the cost of this group as coded
  290. by each of the coding tables.
  291. --*/
  292. for (t = 0; t < nGroups; t++) cost[t] = 0;
  293. if (nGroups == 6 && 50 == ge-gs+1) {
  294. /*--- fast track the common case ---*/
  295. register UInt32 cost01, cost23, cost45;
  296. register UInt16 icv;
  297. cost01 = cost23 = cost45 = 0;
  298. # define BZ_ITER(nn) \
  299. icv = mtfv[gs+(nn)]; \
  300. cost01 += s->len_pack[icv][0]; \
  301. cost23 += s->len_pack[icv][1]; \
  302. cost45 += s->len_pack[icv][2]; \
  303. BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
  304. BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
  305. BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
  306. BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
  307. BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
  308. BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
  309. BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
  310. BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
  311. BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
  312. BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
  313. # undef BZ_ITER
  314. cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
  315. cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
  316. cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
  317. } else {
  318. /*--- slow version which correctly handles all situations ---*/
  319. for (i = gs; i <= ge; i++) {
  320. UInt16 icv = mtfv[i];
  321. for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
  322. }
  323. }
  324. /*--
  325. Find the coding table which is best for this group,
  326. and record its identity in the selector table.
  327. --*/
  328. bc = 999999999; bt = -1;
  329. for (t = 0; t < nGroups; t++)
  330. if (cost[t] < bc) { bc = cost[t]; bt = t; };
  331. totc += bc;
  332. fave[bt]++;
  333. s->selector[nSelectors] = bt;
  334. nSelectors++;
  335. /*--
  336. Increment the symbol frequencies for the selected table.
  337. --*/
  338. if (nGroups == 6 && 50 == ge-gs+1) {
  339. /*--- fast track the common case ---*/
  340. # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
  341. BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
  342. BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
  343. BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
  344. BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
  345. BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
  346. BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
  347. BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
  348. BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
  349. BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
  350. BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
  351. # undef BZ_ITUR
  352. } else {
  353. /*--- slow version which correctly handles all situations ---*/
  354. for (i = gs; i <= ge; i++)
  355. s->rfreq[bt][ mtfv[i] ]++;
  356. }
  357. gs = ge+1;
  358. }
  359. if (s->verbosity >= 3) {
  360. VPrintf2 ( " pass %d: size is %d, grp uses are ",
  361. iter+1, totc/8 );
  362. for (t = 0; t < nGroups; t++)
  363. VPrintf1 ( "%d ", fave[t] );
  364. VPrintf0 ( "\n" );
  365. }
  366. /*--
  367. Recompute the tables based on the accumulated frequencies.
  368. --*/
  369. /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
  370. comment in huffman.c for details. */
  371. for (t = 0; t < nGroups; t++)
  372. BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
  373. alphaSize, 17 /*20*/ );
  374. }
  375. AssertH( nGroups < 8, 3002 );
  376. AssertH( nSelectors < 32768 &&
  377. nSelectors <= BZ_MAX_SELECTORS,
  378. 3003 );
  379. /*--- Compute MTF values for the selectors. ---*/
  380. {
  381. UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
  382. for (i = 0; i < nGroups; i++) pos[i] = i;
  383. for (i = 0; i < nSelectors; i++) {
  384. ll_i = s->selector[i];
  385. j = 0;
  386. tmp = pos[j];
  387. while ( ll_i != tmp ) {
  388. j++;
  389. tmp2 = tmp;
  390. tmp = pos[j];
  391. pos[j] = tmp2;
  392. };
  393. pos[0] = tmp;
  394. s->selectorMtf[i] = j;
  395. }
  396. };
  397. /*--- Assign actual codes for the tables. --*/
  398. for (t = 0; t < nGroups; t++) {
  399. minLen = 32;
  400. maxLen = 0;
  401. for (i = 0; i < alphaSize; i++) {
  402. if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
  403. if (s->len[t][i] < minLen) minLen = s->len[t][i];
  404. }
  405. AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
  406. AssertH ( !(minLen < 1), 3005 );
  407. BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
  408. minLen, maxLen, alphaSize );
  409. }
  410. /*--- Transmit the mapping table. ---*/
  411. {
  412. Bool inUse16[16];
  413. for (i = 0; i < 16; i++) {
  414. inUse16[i] = False;
  415. for (j = 0; j < 16; j++)
  416. if (s->inUse[i * 16 + j]) inUse16[i] = True;
  417. }
  418. nBytes = s->numZ;
  419. for (i = 0; i < 16; i++)
  420. if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
  421. for (i = 0; i < 16; i++)
  422. if (inUse16[i])
  423. for (j = 0; j < 16; j++) {
  424. if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
  425. }
  426. if (s->verbosity >= 3)
  427. VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
  428. }
  429. /*--- Now the selectors. ---*/
  430. nBytes = s->numZ;
  431. bsW ( s, 3, nGroups );
  432. bsW ( s, 15, nSelectors );
  433. for (i = 0; i < nSelectors; i++) {
  434. for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
  435. bsW(s,1,0);
  436. }
  437. if (s->verbosity >= 3)
  438. VPrintf1( "selectors %d, ", s->numZ-nBytes );
  439. /*--- Now the coding tables. ---*/
  440. nBytes = s->numZ;
  441. for (t = 0; t < nGroups; t++) {
  442. Int32 curr = s->len[t][0];
  443. bsW ( s, 5, curr );
  444. for (i = 0; i < alphaSize; i++) {
  445. while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
  446. while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
  447. bsW ( s, 1, 0 );
  448. }
  449. }
  450. if (s->verbosity >= 3)
  451. VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
  452. /*--- And finally, the block data proper ---*/
  453. nBytes = s->numZ;
  454. selCtr = 0;
  455. gs = 0;
  456. while (True) {
  457. if (gs >= s->nMTF) break;
  458. ge = gs + BZ_G_SIZE - 1;
  459. if (ge >= s->nMTF) ge = s->nMTF-1;
  460. AssertH ( s->selector[selCtr] < nGroups, 3006 );
  461. if (nGroups == 6 && 50 == ge-gs+1) {
  462. /*--- fast track the common case ---*/
  463. UInt16 mtfv_i;
  464. UChar* s_len_sel_selCtr
  465. = &(s->len[s->selector[selCtr]][0]);
  466. Int32* s_code_sel_selCtr
  467. = &(s->code[s->selector[selCtr]][0]);
  468. # define BZ_ITAH(nn) \
  469. mtfv_i = mtfv[gs+(nn)]; \
  470. bsW ( s, \
  471. s_len_sel_selCtr[mtfv_i], \
  472. s_code_sel_selCtr[mtfv_i] )
  473. BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
  474. BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
  475. BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
  476. BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
  477. BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
  478. BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
  479. BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
  480. BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
  481. BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
  482. BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
  483. # undef BZ_ITAH
  484. } else {
  485. /*--- slow version which correctly handles all situations ---*/
  486. for (i = gs; i <= ge; i++) {
  487. bsW ( s,
  488. s->len [s->selector[selCtr]] [mtfv[i]],
  489. s->code [s->selector[selCtr]] [mtfv[i]] );
  490. }
  491. }
  492. gs = ge+1;
  493. selCtr++;
  494. }
  495. AssertH( selCtr == nSelectors, 3007 );
  496. if (s->verbosity >= 3)
  497. VPrintf1( "codes %d\n", s->numZ-nBytes );
  498. }
  499. /*---------------------------------------------------*/
  500. void BZ2_compressBlock ( EState* s, Bool is_last_block )
  501. {
  502. if (s->nblock > 0) {
  503. BZ_FINALISE_CRC ( s->blockCRC );
  504. s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
  505. s->combinedCRC ^= s->blockCRC;
  506. if (s->blockNo > 1) s->numZ = 0;
  507. if (s->verbosity >= 2)
  508. VPrintf4( " block %d: crc = 0x%08x, "
  509. "combined CRC = 0x%08x, size = %d\n",
  510. s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
  511. BZ2_blockSort ( s );
  512. }
  513. s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
  514. /*-- If this is the first block, create the stream header. --*/
  515. if (s->blockNo == 1) {
  516. BZ2_bsInitWrite ( s );
  517. bsPutUChar ( s, BZ_HDR_B );
  518. bsPutUChar ( s, BZ_HDR_Z );
  519. bsPutUChar ( s, BZ_HDR_h );
  520. bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
  521. }
  522. if (s->nblock > 0) {
  523. bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
  524. bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
  525. bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
  526. /*-- Now the block's CRC, so it is in a known place. --*/
  527. bsPutUInt32 ( s, s->blockCRC );
  528. /*--
  529. Now a single bit indicating (non-)randomisation.
  530. As of version 0.9.5, we use a better sorting algorithm
  531. which makes randomisation unnecessary. So always set
  532. the randomised bit to 'no'. Of course, the decoder
  533. still needs to be able to handle randomised blocks
  534. so as to maintain backwards compatibility with
  535. older versions of bzip2.
  536. --*/
  537. bsW(s,1,0);
  538. bsW ( s, 24, s->origPtr );
  539. generateMTFValues ( s );
  540. sendMTFValues ( s );
  541. }
  542. /*-- If this is the last block, add the stream trailer. --*/
  543. if (is_last_block) {
  544. bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
  545. bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
  546. bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
  547. bsPutUInt32 ( s, s->combinedCRC );
  548. if (s->verbosity >= 2)
  549. VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
  550. bsFinishWrite ( s );
  551. }
  552. }
  553. /*-------------------------------------------------------------*/
  554. /*--- end compress.c ---*/
  555. /*-------------------------------------------------------------*/