quant_enc.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // Quantization
  11. //
  12. // Author: Skal (pascal.massimino@gmail.com)
  13. #include <assert.h>
  14. #include <math.h>
  15. #include <stdlib.h> // for abs()
  16. #include "../dsp/quant.h"
  17. #include "./vp8i_enc.h"
  18. #include "./cost_enc.h"
  19. #define DO_TRELLIS_I4 1
  20. #define DO_TRELLIS_I16 1 // not a huge gain, but ok at low bitrate.
  21. #define DO_TRELLIS_UV 0 // disable trellis for UV. Risky. Not worth.
  22. #define USE_TDISTO 1
  23. #define MID_ALPHA 64 // neutral value for susceptibility
  24. #define MIN_ALPHA 30 // lowest usable value for susceptibility
  25. #define MAX_ALPHA 100 // higher meaningful value for susceptibility
  26. #define SNS_TO_DQ 0.9 // Scaling constant between the sns value and the QP
  27. // power-law modulation. Must be strictly less than 1.
  28. // number of non-zero coeffs below which we consider the block very flat
  29. // (and apply a penalty to complex predictions)
  30. #define FLATNESS_LIMIT_I16 0 // I16 mode (special case)
  31. #define FLATNESS_LIMIT_I4 3 // I4 mode
  32. #define FLATNESS_LIMIT_UV 2 // UV mode
  33. #define FLATNESS_PENALTY 140 // roughly ~1bit per block
  34. #define MULT_8B(a, b) (((a) * (b) + 128) >> 8)
  35. #define RD_DISTO_MULT 256 // distortion multiplier (equivalent of lambda)
  36. // #define DEBUG_BLOCK
  37. //------------------------------------------------------------------------------
  38. #if defined(DEBUG_BLOCK)
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. static void PrintBlockInfo(const VP8EncIterator* const it,
  42. const VP8ModeScore* const rd) {
  43. int i, j;
  44. const int is_i16 = (it->mb_->type_ == 1);
  45. const uint8_t* const y_in = it->yuv_in_ + Y_OFF_ENC;
  46. const uint8_t* const y_out = it->yuv_out_ + Y_OFF_ENC;
  47. const uint8_t* const uv_in = it->yuv_in_ + U_OFF_ENC;
  48. const uint8_t* const uv_out = it->yuv_out_ + U_OFF_ENC;
  49. printf("SOURCE / OUTPUT / ABS DELTA\n");
  50. for (j = 0; j < 16; ++j) {
  51. for (i = 0; i < 16; ++i) printf("%3d ", y_in[i + j * BPS]);
  52. printf(" ");
  53. for (i = 0; i < 16; ++i) printf("%3d ", y_out[i + j * BPS]);
  54. printf(" ");
  55. for (i = 0; i < 16; ++i) {
  56. printf("%1d ", abs(y_in[i + j * BPS] - y_out[i + j * BPS]));
  57. }
  58. printf("\n");
  59. }
  60. printf("\n"); // newline before the U/V block
  61. for (j = 0; j < 8; ++j) {
  62. for (i = 0; i < 8; ++i) printf("%3d ", uv_in[i + j * BPS]);
  63. printf(" ");
  64. for (i = 8; i < 16; ++i) printf("%3d ", uv_in[i + j * BPS]);
  65. printf(" ");
  66. for (i = 0; i < 8; ++i) printf("%3d ", uv_out[i + j * BPS]);
  67. printf(" ");
  68. for (i = 8; i < 16; ++i) printf("%3d ", uv_out[i + j * BPS]);
  69. printf(" ");
  70. for (i = 0; i < 8; ++i) {
  71. printf("%1d ", abs(uv_out[i + j * BPS] - uv_in[i + j * BPS]));
  72. }
  73. printf(" ");
  74. for (i = 8; i < 16; ++i) {
  75. printf("%1d ", abs(uv_out[i + j * BPS] - uv_in[i + j * BPS]));
  76. }
  77. printf("\n");
  78. }
  79. printf("\nD:%d SD:%d R:%d H:%d nz:0x%x score:%d\n",
  80. (int)rd->D, (int)rd->SD, (int)rd->R, (int)rd->H, (int)rd->nz,
  81. (int)rd->score);
  82. if (is_i16) {
  83. printf("Mode: %d\n", rd->mode_i16);
  84. printf("y_dc_levels:");
  85. for (i = 0; i < 16; ++i) printf("%3d ", rd->y_dc_levels[i]);
  86. printf("\n");
  87. } else {
  88. printf("Modes[16]: ");
  89. for (i = 0; i < 16; ++i) printf("%d ", rd->modes_i4[i]);
  90. printf("\n");
  91. }
  92. printf("y_ac_levels:\n");
  93. for (j = 0; j < 16; ++j) {
  94. for (i = is_i16 ? 1 : 0; i < 16; ++i) {
  95. printf("%4d ", rd->y_ac_levels[j][i]);
  96. }
  97. printf("\n");
  98. }
  99. printf("\n");
  100. printf("uv_levels (mode=%d):\n", rd->mode_uv);
  101. for (j = 0; j < 8; ++j) {
  102. for (i = 0; i < 16; ++i) {
  103. printf("%4d ", rd->uv_levels[j][i]);
  104. }
  105. printf("\n");
  106. }
  107. }
  108. #endif // DEBUG_BLOCK
  109. //------------------------------------------------------------------------------
  110. static WEBP_INLINE int clip(int v, int m, int M) {
  111. return v < m ? m : v > M ? M : v;
  112. }
  113. static const uint8_t kZigzag[16] = {
  114. 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
  115. };
  116. static const uint8_t kDcTable[128] = {
  117. 4, 5, 6, 7, 8, 9, 10, 10,
  118. 11, 12, 13, 14, 15, 16, 17, 17,
  119. 18, 19, 20, 20, 21, 21, 22, 22,
  120. 23, 23, 24, 25, 25, 26, 27, 28,
  121. 29, 30, 31, 32, 33, 34, 35, 36,
  122. 37, 37, 38, 39, 40, 41, 42, 43,
  123. 44, 45, 46, 46, 47, 48, 49, 50,
  124. 51, 52, 53, 54, 55, 56, 57, 58,
  125. 59, 60, 61, 62, 63, 64, 65, 66,
  126. 67, 68, 69, 70, 71, 72, 73, 74,
  127. 75, 76, 76, 77, 78, 79, 80, 81,
  128. 82, 83, 84, 85, 86, 87, 88, 89,
  129. 91, 93, 95, 96, 98, 100, 101, 102,
  130. 104, 106, 108, 110, 112, 114, 116, 118,
  131. 122, 124, 126, 128, 130, 132, 134, 136,
  132. 138, 140, 143, 145, 148, 151, 154, 157
  133. };
  134. static const uint16_t kAcTable[128] = {
  135. 4, 5, 6, 7, 8, 9, 10, 11,
  136. 12, 13, 14, 15, 16, 17, 18, 19,
  137. 20, 21, 22, 23, 24, 25, 26, 27,
  138. 28, 29, 30, 31, 32, 33, 34, 35,
  139. 36, 37, 38, 39, 40, 41, 42, 43,
  140. 44, 45, 46, 47, 48, 49, 50, 51,
  141. 52, 53, 54, 55, 56, 57, 58, 60,
  142. 62, 64, 66, 68, 70, 72, 74, 76,
  143. 78, 80, 82, 84, 86, 88, 90, 92,
  144. 94, 96, 98, 100, 102, 104, 106, 108,
  145. 110, 112, 114, 116, 119, 122, 125, 128,
  146. 131, 134, 137, 140, 143, 146, 149, 152,
  147. 155, 158, 161, 164, 167, 170, 173, 177,
  148. 181, 185, 189, 193, 197, 201, 205, 209,
  149. 213, 217, 221, 225, 229, 234, 239, 245,
  150. 249, 254, 259, 264, 269, 274, 279, 284
  151. };
  152. static const uint16_t kAcTable2[128] = {
  153. 8, 8, 9, 10, 12, 13, 15, 17,
  154. 18, 20, 21, 23, 24, 26, 27, 29,
  155. 31, 32, 34, 35, 37, 38, 40, 41,
  156. 43, 44, 46, 48, 49, 51, 52, 54,
  157. 55, 57, 58, 60, 62, 63, 65, 66,
  158. 68, 69, 71, 72, 74, 75, 77, 79,
  159. 80, 82, 83, 85, 86, 88, 89, 93,
  160. 96, 99, 102, 105, 108, 111, 114, 117,
  161. 120, 124, 127, 130, 133, 136, 139, 142,
  162. 145, 148, 151, 155, 158, 161, 164, 167,
  163. 170, 173, 176, 179, 184, 189, 193, 198,
  164. 203, 207, 212, 217, 221, 226, 230, 235,
  165. 240, 244, 249, 254, 258, 263, 268, 274,
  166. 280, 286, 292, 299, 305, 311, 317, 323,
  167. 330, 336, 342, 348, 354, 362, 370, 379,
  168. 385, 393, 401, 409, 416, 424, 432, 440
  169. };
  170. static const uint8_t kBiasMatrices[3][2] = { // [luma-ac,luma-dc,chroma][dc,ac]
  171. { 96, 110 }, { 96, 108 }, { 110, 115 }
  172. };
  173. // Sharpening by (slightly) raising the hi-frequency coeffs.
  174. // Hack-ish but helpful for mid-bitrate range. Use with care.
  175. #define SHARPEN_BITS 11 // number of descaling bits for sharpening bias
  176. static const uint8_t kFreqSharpening[16] = {
  177. 0, 30, 60, 90,
  178. 30, 60, 90, 90,
  179. 60, 90, 90, 90,
  180. 90, 90, 90, 90
  181. };
  182. //------------------------------------------------------------------------------
  183. // Initialize quantization parameters in VP8Matrix
  184. // Returns the average quantizer
  185. static int ExpandMatrix(VP8Matrix* const m, int type) {
  186. int i, sum;
  187. for (i = 0; i < 2; ++i) {
  188. const int is_ac_coeff = (i > 0);
  189. const int bias = kBiasMatrices[type][is_ac_coeff];
  190. m->iq_[i] = (1 << QFIX) / m->q_[i];
  191. m->bias_[i] = BIAS(bias);
  192. // zthresh_ is the exact value such that QUANTDIV(coeff, iQ, B) is:
  193. // * zero if coeff <= zthresh
  194. // * non-zero if coeff > zthresh
  195. m->zthresh_[i] = ((1 << QFIX) - 1 - m->bias_[i]) / m->iq_[i];
  196. }
  197. for (i = 2; i < 16; ++i) {
  198. m->q_[i] = m->q_[1];
  199. m->iq_[i] = m->iq_[1];
  200. m->bias_[i] = m->bias_[1];
  201. m->zthresh_[i] = m->zthresh_[1];
  202. }
  203. for (sum = 0, i = 0; i < 16; ++i) {
  204. if (type == 0) { // we only use sharpening for AC luma coeffs
  205. m->sharpen_[i] = (kFreqSharpening[i] * m->q_[i]) >> SHARPEN_BITS;
  206. } else {
  207. m->sharpen_[i] = 0;
  208. }
  209. sum += m->q_[i];
  210. }
  211. return (sum + 8) >> 4;
  212. }
  213. static void CheckLambdaValue(int* const v) { if (*v < 1) *v = 1; }
  214. static void SetupMatrices(VP8Encoder* enc) {
  215. int i;
  216. const int tlambda_scale =
  217. (enc->method_ >= 4) ? enc->config_->sns_strength
  218. : 0;
  219. const int num_segments = enc->segment_hdr_.num_segments_;
  220. for (i = 0; i < num_segments; ++i) {
  221. VP8SegmentInfo* const m = &enc->dqm_[i];
  222. const int q = m->quant_;
  223. int q_i4, q_i16, q_uv;
  224. m->y1_.q_[0] = kDcTable[clip(q + enc->dq_y1_dc_, 0, 127)];
  225. m->y1_.q_[1] = kAcTable[clip(q, 0, 127)];
  226. m->y2_.q_[0] = kDcTable[ clip(q + enc->dq_y2_dc_, 0, 127)] * 2;
  227. m->y2_.q_[1] = kAcTable2[clip(q + enc->dq_y2_ac_, 0, 127)];
  228. m->uv_.q_[0] = kDcTable[clip(q + enc->dq_uv_dc_, 0, 117)];
  229. m->uv_.q_[1] = kAcTable[clip(q + enc->dq_uv_ac_, 0, 127)];
  230. q_i4 = ExpandMatrix(&m->y1_, 0);
  231. q_i16 = ExpandMatrix(&m->y2_, 1);
  232. q_uv = ExpandMatrix(&m->uv_, 2);
  233. m->lambda_i4_ = (3 * q_i4 * q_i4) >> 7;
  234. m->lambda_i16_ = (3 * q_i16 * q_i16);
  235. m->lambda_uv_ = (3 * q_uv * q_uv) >> 6;
  236. m->lambda_mode_ = (1 * q_i4 * q_i4) >> 7;
  237. m->lambda_trellis_i4_ = (7 * q_i4 * q_i4) >> 3;
  238. m->lambda_trellis_i16_ = (q_i16 * q_i16) >> 2;
  239. m->lambda_trellis_uv_ = (q_uv * q_uv) << 1;
  240. m->tlambda_ = (tlambda_scale * q_i4) >> 5;
  241. // none of these constants should be < 1
  242. CheckLambdaValue(&m->lambda_i4_);
  243. CheckLambdaValue(&m->lambda_i16_);
  244. CheckLambdaValue(&m->lambda_uv_);
  245. CheckLambdaValue(&m->lambda_mode_);
  246. CheckLambdaValue(&m->lambda_trellis_i4_);
  247. CheckLambdaValue(&m->lambda_trellis_i16_);
  248. CheckLambdaValue(&m->lambda_trellis_uv_);
  249. CheckLambdaValue(&m->tlambda_);
  250. m->min_disto_ = 20 * m->y1_.q_[0]; // quantization-aware min disto
  251. m->max_edge_ = 0;
  252. m->i4_penalty_ = 1000 * q_i4 * q_i4;
  253. }
  254. }
  255. //------------------------------------------------------------------------------
  256. // Initialize filtering parameters
  257. // Very small filter-strength values have close to no visual effect. So we can
  258. // save a little decoding-CPU by turning filtering off for these.
  259. #define FSTRENGTH_CUTOFF 2
  260. static void SetupFilterStrength(VP8Encoder* const enc) {
  261. int i;
  262. // level0 is in [0..500]. Using '-f 50' as filter_strength is mid-filtering.
  263. const int level0 = 5 * enc->config_->filter_strength;
  264. for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
  265. VP8SegmentInfo* const m = &enc->dqm_[i];
  266. // We focus on the quantization of AC coeffs.
  267. const int qstep = kAcTable[clip(m->quant_, 0, 127)] >> 2;
  268. const int base_strength =
  269. VP8FilterStrengthFromDelta(enc->filter_hdr_.sharpness_, qstep);
  270. // Segments with lower complexity ('beta') will be less filtered.
  271. const int f = base_strength * level0 / (256 + m->beta_);
  272. m->fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f;
  273. }
  274. // We record the initial strength (mainly for the case of 1-segment only).
  275. enc->filter_hdr_.level_ = enc->dqm_[0].fstrength_;
  276. enc->filter_hdr_.simple_ = (enc->config_->filter_type == 0);
  277. enc->filter_hdr_.sharpness_ = enc->config_->filter_sharpness;
  278. }
  279. //------------------------------------------------------------------------------
  280. // Note: if you change the values below, remember that the max range
  281. // allowed by the syntax for DQ_UV is [-16,16].
  282. #define MAX_DQ_UV (6)
  283. #define MIN_DQ_UV (-4)
  284. // We want to emulate jpeg-like behaviour where the expected "good" quality
  285. // is around q=75. Internally, our "good" middle is around c=50. So we
  286. // map accordingly using linear piece-wise function
  287. static double QualityToCompression(double c) {
  288. const double linear_c = (c < 0.75) ? c * (2. / 3.) : 2. * c - 1.;
  289. // The file size roughly scales as pow(quantizer, 3.). Actually, the
  290. // exponent is somewhere between 2.8 and 3.2, but we're mostly interested
  291. // in the mid-quant range. So we scale the compressibility inversely to
  292. // this power-law: quant ~= compression ^ 1/3. This law holds well for
  293. // low quant. Finer modeling for high-quant would make use of kAcTable[]
  294. // more explicitly.
  295. const double v = pow(linear_c, 1 / 3.);
  296. return v;
  297. }
  298. static double QualityToJPEGCompression(double c, double alpha) {
  299. // We map the complexity 'alpha' and quality setting 'c' to a compression
  300. // exponent empirically matched to the compression curve of libjpeg6b.
  301. // On average, the WebP output size will be roughly similar to that of a
  302. // JPEG file compressed with same quality factor.
  303. const double amin = 0.30;
  304. const double amax = 0.85;
  305. const double exp_min = 0.4;
  306. const double exp_max = 0.9;
  307. const double slope = (exp_min - exp_max) / (amax - amin);
  308. // Linearly interpolate 'expn' from exp_min to exp_max
  309. // in the [amin, amax] range.
  310. const double expn = (alpha > amax) ? exp_min
  311. : (alpha < amin) ? exp_max
  312. : exp_max + slope * (alpha - amin);
  313. const double v = pow(c, expn);
  314. return v;
  315. }
  316. static int SegmentsAreEquivalent(const VP8SegmentInfo* const S1,
  317. const VP8SegmentInfo* const S2) {
  318. return (S1->quant_ == S2->quant_) && (S1->fstrength_ == S2->fstrength_);
  319. }
  320. static void SimplifySegments(VP8Encoder* const enc) {
  321. int map[NUM_MB_SEGMENTS] = { 0, 1, 2, 3 };
  322. // 'num_segments_' is previously validated and <= NUM_MB_SEGMENTS, but an
  323. // explicit check is needed to avoid a spurious warning about 'i' exceeding
  324. // array bounds of 'dqm_' with some compilers (noticed with gcc-4.9).
  325. const int num_segments = (enc->segment_hdr_.num_segments_ < NUM_MB_SEGMENTS)
  326. ? enc->segment_hdr_.num_segments_
  327. : NUM_MB_SEGMENTS;
  328. int num_final_segments = 1;
  329. int s1, s2;
  330. for (s1 = 1; s1 < num_segments; ++s1) { // find similar segments
  331. const VP8SegmentInfo* const S1 = &enc->dqm_[s1];
  332. int found = 0;
  333. // check if we already have similar segment
  334. for (s2 = 0; s2 < num_final_segments; ++s2) {
  335. const VP8SegmentInfo* const S2 = &enc->dqm_[s2];
  336. if (SegmentsAreEquivalent(S1, S2)) {
  337. found = 1;
  338. break;
  339. }
  340. }
  341. map[s1] = s2;
  342. if (!found) {
  343. if (num_final_segments != s1) {
  344. enc->dqm_[num_final_segments] = enc->dqm_[s1];
  345. }
  346. ++num_final_segments;
  347. }
  348. }
  349. if (num_final_segments < num_segments) { // Remap
  350. int i = enc->mb_w_ * enc->mb_h_;
  351. while (i-- > 0) enc->mb_info_[i].segment_ = map[enc->mb_info_[i].segment_];
  352. enc->segment_hdr_.num_segments_ = num_final_segments;
  353. // Replicate the trailing segment infos (it's mostly cosmetics)
  354. for (i = num_final_segments; i < num_segments; ++i) {
  355. enc->dqm_[i] = enc->dqm_[num_final_segments - 1];
  356. }
  357. }
  358. }
  359. void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
  360. int i;
  361. int dq_uv_ac, dq_uv_dc;
  362. const int num_segments = enc->segment_hdr_.num_segments_;
  363. const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.;
  364. const double Q = quality / 100.;
  365. const double c_base = enc->config_->emulate_jpeg_size ?
  366. QualityToJPEGCompression(Q, enc->alpha_ / 255.) :
  367. QualityToCompression(Q);
  368. for (i = 0; i < num_segments; ++i) {
  369. // We modulate the base coefficient to accommodate for the quantization
  370. // susceptibility and allow denser segments to be quantized more.
  371. const double expn = 1. - amp * enc->dqm_[i].alpha_;
  372. const double c = pow(c_base, expn);
  373. const int q = (int)(127. * (1. - c));
  374. assert(expn > 0.);
  375. enc->dqm_[i].quant_ = clip(q, 0, 127);
  376. }
  377. // purely indicative in the bitstream (except for the 1-segment case)
  378. enc->base_quant_ = enc->dqm_[0].quant_;
  379. // fill-in values for the unused segments (required by the syntax)
  380. for (i = num_segments; i < NUM_MB_SEGMENTS; ++i) {
  381. enc->dqm_[i].quant_ = enc->base_quant_;
  382. }
  383. // uv_alpha_ is normally spread around ~60. The useful range is
  384. // typically ~30 (quite bad) to ~100 (ok to decimate UV more).
  385. // We map it to the safe maximal range of MAX/MIN_DQ_UV for dq_uv.
  386. dq_uv_ac = (enc->uv_alpha_ - MID_ALPHA) * (MAX_DQ_UV - MIN_DQ_UV)
  387. / (MAX_ALPHA - MIN_ALPHA);
  388. // we rescale by the user-defined strength of adaptation
  389. dq_uv_ac = dq_uv_ac * enc->config_->sns_strength / 100;
  390. // and make it safe.
  391. dq_uv_ac = clip(dq_uv_ac, MIN_DQ_UV, MAX_DQ_UV);
  392. // We also boost the dc-uv-quant a little, based on sns-strength, since
  393. // U/V channels are quite more reactive to high quants (flat DC-blocks
  394. // tend to appear, and are unpleasant).
  395. dq_uv_dc = -4 * enc->config_->sns_strength / 100;
  396. dq_uv_dc = clip(dq_uv_dc, -15, 15); // 4bit-signed max allowed
  397. enc->dq_y1_dc_ = 0; // TODO(skal): dq-lum
  398. enc->dq_y2_dc_ = 0;
  399. enc->dq_y2_ac_ = 0;
  400. enc->dq_uv_dc_ = dq_uv_dc;
  401. enc->dq_uv_ac_ = dq_uv_ac;
  402. SetupFilterStrength(enc); // initialize segments' filtering, eventually
  403. if (num_segments > 1) SimplifySegments(enc);
  404. SetupMatrices(enc); // finalize quantization matrices
  405. }
  406. //------------------------------------------------------------------------------
  407. // Form the predictions in cache
  408. // Must be ordered using {DC_PRED, TM_PRED, V_PRED, H_PRED} as index
  409. const uint16_t VP8I16ModeOffsets[4] = { I16DC16, I16TM16, I16VE16, I16HE16 };
  410. const uint16_t VP8UVModeOffsets[4] = { C8DC8, C8TM8, C8VE8, C8HE8 };
  411. // Must be indexed using {B_DC_PRED -> B_HU_PRED} as index
  412. const uint16_t VP8I4ModeOffsets[NUM_BMODES] = {
  413. I4DC4, I4TM4, I4VE4, I4HE4, I4RD4, I4VR4, I4LD4, I4VL4, I4HD4, I4HU4
  414. };
  415. void VP8MakeLuma16Preds(const VP8EncIterator* const it) {
  416. const uint8_t* const left = it->x_ ? it->y_left_ : NULL;
  417. const uint8_t* const top = it->y_ ? it->y_top_ : NULL;
  418. VP8EncPredLuma16(it->yuv_p_, left, top);
  419. }
  420. void VP8MakeChroma8Preds(const VP8EncIterator* const it) {
  421. const uint8_t* const left = it->x_ ? it->u_left_ : NULL;
  422. const uint8_t* const top = it->y_ ? it->uv_top_ : NULL;
  423. VP8EncPredChroma8(it->yuv_p_, left, top);
  424. }
  425. void VP8MakeIntra4Preds(const VP8EncIterator* const it) {
  426. VP8EncPredLuma4(it->yuv_p_, it->i4_top_);
  427. }
  428. //------------------------------------------------------------------------------
  429. // Quantize
  430. // Layout:
  431. // +----+----+
  432. // |YYYY|UUVV| 0
  433. // |YYYY|UUVV| 4
  434. // |YYYY|....| 8
  435. // |YYYY|....| 12
  436. // +----+----+
  437. const uint16_t VP8Scan[16] = { // Luma
  438. 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
  439. 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
  440. 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
  441. 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS,
  442. };
  443. static const uint16_t VP8ScanUV[4 + 4] = {
  444. 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U
  445. 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
  446. };
  447. //------------------------------------------------------------------------------
  448. // Distortion measurement
  449. static const uint16_t kWeightY[16] = {
  450. 38, 32, 20, 9, 32, 28, 17, 7, 20, 17, 10, 4, 9, 7, 4, 2
  451. };
  452. static const uint16_t kWeightTrellis[16] = {
  453. #if USE_TDISTO == 0
  454. 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
  455. #else
  456. 30, 27, 19, 11,
  457. 27, 24, 17, 10,
  458. 19, 17, 12, 8,
  459. 11, 10, 8, 6
  460. #endif
  461. };
  462. // Init/Copy the common fields in score.
  463. static void InitScore(VP8ModeScore* const rd) {
  464. rd->D = 0;
  465. rd->SD = 0;
  466. rd->R = 0;
  467. rd->H = 0;
  468. rd->nz = 0;
  469. rd->score = MAX_COST;
  470. }
  471. static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
  472. dst->D = src->D;
  473. dst->SD = src->SD;
  474. dst->R = src->R;
  475. dst->H = src->H;
  476. dst->nz = src->nz; // note that nz is not accumulated, but just copied.
  477. dst->score = src->score;
  478. }
  479. static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
  480. dst->D += src->D;
  481. dst->SD += src->SD;
  482. dst->R += src->R;
  483. dst->H += src->H;
  484. dst->nz |= src->nz; // here, new nz bits are accumulated.
  485. dst->score += src->score;
  486. }
  487. //------------------------------------------------------------------------------
  488. // Performs trellis-optimized quantization.
  489. // Trellis node
  490. typedef struct {
  491. int8_t prev; // best previous node
  492. int8_t sign; // sign of coeff_i
  493. int16_t level; // level
  494. } Node;
  495. // Score state
  496. typedef struct {
  497. score_t score; // partial RD score
  498. const uint16_t* costs; // shortcut to cost tables
  499. } ScoreState;
  500. // If a coefficient was quantized to a value Q (using a neutral bias),
  501. // we test all alternate possibilities between [Q-MIN_DELTA, Q+MAX_DELTA]
  502. // We don't test negative values though.
  503. #define MIN_DELTA 0 // how much lower level to try
  504. #define MAX_DELTA 1 // how much higher
  505. #define NUM_NODES (MIN_DELTA + 1 + MAX_DELTA)
  506. #define NODE(n, l) (nodes[(n)][(l) + MIN_DELTA])
  507. #define SCORE_STATE(n, l) (score_states[n][(l) + MIN_DELTA])
  508. static WEBP_INLINE void SetRDScore(int lambda, VP8ModeScore* const rd) {
  509. rd->score = (rd->R + rd->H) * lambda + RD_DISTO_MULT * (rd->D + rd->SD);
  510. }
  511. static WEBP_INLINE score_t RDScoreTrellis(int lambda, score_t rate,
  512. score_t distortion) {
  513. return rate * lambda + RD_DISTO_MULT * distortion;
  514. }
  515. // Coefficient type.
  516. enum { TYPE_I16_AC = 0, TYPE_I16_DC = 1, TYPE_CHROMA_A = 2, TYPE_I4_AC = 3 };
  517. static int TrellisQuantizeBlock(const VP8Encoder* const enc,
  518. int16_t in[16], int16_t out[16],
  519. int ctx0, int coeff_type,
  520. const VP8Matrix* const mtx,
  521. int lambda) {
  522. const ProbaArray* const probas = enc->proba_.coeffs_[coeff_type];
  523. CostArrayPtr const costs =
  524. (CostArrayPtr)enc->proba_.remapped_costs_[coeff_type];
  525. const int first = (coeff_type == TYPE_I16_AC) ? 1 : 0;
  526. Node nodes[16][NUM_NODES];
  527. ScoreState score_states[2][NUM_NODES];
  528. ScoreState* ss_cur = &SCORE_STATE(0, MIN_DELTA);
  529. ScoreState* ss_prev = &SCORE_STATE(1, MIN_DELTA);
  530. int best_path[3] = {-1, -1, -1}; // store best-last/best-level/best-previous
  531. score_t best_score;
  532. int n, m, p, last;
  533. {
  534. score_t cost;
  535. const int thresh = mtx->q_[1] * mtx->q_[1] / 4;
  536. const int last_proba = probas[VP8EncBands[first]][ctx0][0];
  537. // compute the position of the last interesting coefficient
  538. last = first - 1;
  539. for (n = 15; n >= first; --n) {
  540. const int j = kZigzag[n];
  541. const int err = in[j] * in[j];
  542. if (err > thresh) {
  543. last = n;
  544. break;
  545. }
  546. }
  547. // we don't need to go inspect up to n = 16 coeffs. We can just go up
  548. // to last + 1 (inclusive) without losing much.
  549. if (last < 15) ++last;
  550. // compute 'skip' score. This is the max score one can do.
  551. cost = VP8BitCost(0, last_proba);
  552. best_score = RDScoreTrellis(lambda, cost, 0);
  553. // initialize source node.
  554. for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
  555. const score_t rate = (ctx0 == 0) ? VP8BitCost(1, last_proba) : 0;
  556. ss_cur[m].score = RDScoreTrellis(lambda, rate, 0);
  557. ss_cur[m].costs = costs[first][ctx0];
  558. }
  559. }
  560. // traverse trellis.
  561. for (n = first; n <= last; ++n) {
  562. const int j = kZigzag[n];
  563. const uint32_t Q = mtx->q_[j];
  564. const uint32_t iQ = mtx->iq_[j];
  565. const uint32_t B = BIAS(0x00); // neutral bias
  566. // note: it's important to take sign of the _original_ coeff,
  567. // so we don't have to consider level < 0 afterward.
  568. const int sign = (in[j] < 0);
  569. const uint32_t coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
  570. int level0 = QUANTDIV(coeff0, iQ, B);
  571. int thresh_level = QUANTDIV(coeff0, iQ, BIAS(0x80));
  572. if (thresh_level > MAX_LEVEL) thresh_level = MAX_LEVEL;
  573. if (level0 > MAX_LEVEL) level0 = MAX_LEVEL;
  574. { // Swap current and previous score states
  575. ScoreState* const tmp = ss_cur;
  576. ss_cur = ss_prev;
  577. ss_prev = tmp;
  578. }
  579. // test all alternate level values around level0.
  580. for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
  581. Node* const cur = &NODE(n, m);
  582. const int level = level0 + m;
  583. const int ctx = (level > 2) ? 2 : level;
  584. const int band = VP8EncBands[n + 1];
  585. score_t base_score;
  586. score_t best_cur_score;
  587. int best_prev;
  588. score_t cost, score;
  589. ss_cur[m].costs = costs[n + 1][ctx];
  590. if (level < 0 || level > thresh_level) {
  591. ss_cur[m].score = MAX_COST;
  592. // Node is dead.
  593. continue;
  594. }
  595. {
  596. // Compute delta_error = how much coding this level will
  597. // subtract to max_error as distortion.
  598. // Here, distortion = sum of (|coeff_i| - level_i * Q_i)^2
  599. const int new_error = coeff0 - level * Q;
  600. const int delta_error =
  601. kWeightTrellis[j] * (new_error * new_error - coeff0 * coeff0);
  602. base_score = RDScoreTrellis(lambda, 0, delta_error);
  603. }
  604. // Inspect all possible non-dead predecessors. Retain only the best one.
  605. // The base_score is added to all scores so it is only added for the final
  606. // value after the loop.
  607. cost = VP8LevelCost(ss_prev[-MIN_DELTA].costs, level);
  608. best_cur_score =
  609. ss_prev[-MIN_DELTA].score + RDScoreTrellis(lambda, cost, 0);
  610. best_prev = -MIN_DELTA;
  611. for (p = -MIN_DELTA + 1; p <= MAX_DELTA; ++p) {
  612. // Dead nodes (with ss_prev[p].score >= MAX_COST) are automatically
  613. // eliminated since their score can't be better than the current best.
  614. cost = VP8LevelCost(ss_prev[p].costs, level);
  615. // Examine node assuming it's a non-terminal one.
  616. score = ss_prev[p].score + RDScoreTrellis(lambda, cost, 0);
  617. if (score < best_cur_score) {
  618. best_cur_score = score;
  619. best_prev = p;
  620. }
  621. }
  622. best_cur_score += base_score;
  623. // Store best finding in current node.
  624. cur->sign = sign;
  625. cur->level = level;
  626. cur->prev = best_prev;
  627. ss_cur[m].score = best_cur_score;
  628. // Now, record best terminal node (and thus best entry in the graph).
  629. if (level != 0 && best_cur_score < best_score) {
  630. const score_t last_pos_cost =
  631. (n < 15) ? VP8BitCost(0, probas[band][ctx][0]) : 0;
  632. const score_t last_pos_score = RDScoreTrellis(lambda, last_pos_cost, 0);
  633. score = best_cur_score + last_pos_score;
  634. if (score < best_score) {
  635. best_score = score;
  636. best_path[0] = n; // best eob position
  637. best_path[1] = m; // best node index
  638. best_path[2] = best_prev; // best predecessor
  639. }
  640. }
  641. }
  642. }
  643. // Fresh start
  644. // Beware! We must preserve in[0]/out[0] value for TYPE_I16_AC case.
  645. if (coeff_type == TYPE_I16_AC) {
  646. memset(in + 1, 0, 15 * sizeof(*in));
  647. memset(out + 1, 0, 15 * sizeof(*out));
  648. } else {
  649. memset(in, 0, 16 * sizeof(*in));
  650. memset(out, 0, 16 * sizeof(*out));
  651. }
  652. if (best_path[0] == -1) {
  653. return 0; // skip!
  654. }
  655. {
  656. // Unwind the best path.
  657. // Note: best-prev on terminal node is not necessarily equal to the
  658. // best_prev for non-terminal. So we patch best_path[2] in.
  659. int nz = 0;
  660. int best_node = best_path[1];
  661. n = best_path[0];
  662. NODE(n, best_node).prev = best_path[2]; // force best-prev for terminal
  663. for (; n >= first; --n) {
  664. const Node* const node = &NODE(n, best_node);
  665. const int j = kZigzag[n];
  666. out[n] = node->sign ? -node->level : node->level;
  667. nz |= node->level;
  668. in[j] = out[n] * mtx->q_[j];
  669. best_node = node->prev;
  670. }
  671. return (nz != 0);
  672. }
  673. }
  674. #undef NODE
  675. //------------------------------------------------------------------------------
  676. // Performs: difference, transform, quantize, back-transform, add
  677. // all at once. Output is the reconstructed block in *yuv_out, and the
  678. // quantized levels in *levels.
  679. static int ReconstructIntra16(VP8EncIterator* const it,
  680. VP8ModeScore* const rd,
  681. uint8_t* const yuv_out,
  682. int mode) {
  683. const VP8Encoder* const enc = it->enc_;
  684. const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
  685. const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC;
  686. const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
  687. int nz = 0;
  688. int n;
  689. int16_t tmp[16][16], dc_tmp[16];
  690. for (n = 0; n < 16; n += 2) {
  691. VP8FTransform2(src + VP8Scan[n], ref + VP8Scan[n], tmp[n]);
  692. }
  693. VP8FTransformWHT(tmp[0], dc_tmp);
  694. nz |= VP8EncQuantizeBlockWHT(dc_tmp, rd->y_dc_levels, &dqm->y2_) << 24;
  695. if (DO_TRELLIS_I16 && it->do_trellis_) {
  696. int x, y;
  697. VP8IteratorNzToBytes(it);
  698. for (y = 0, n = 0; y < 4; ++y) {
  699. for (x = 0; x < 4; ++x, ++n) {
  700. const int ctx = it->top_nz_[x] + it->left_nz_[y];
  701. const int non_zero = TrellisQuantizeBlock(
  702. enc, tmp[n], rd->y_ac_levels[n], ctx, TYPE_I16_AC, &dqm->y1_,
  703. dqm->lambda_trellis_i16_);
  704. it->top_nz_[x] = it->left_nz_[y] = non_zero;
  705. rd->y_ac_levels[n][0] = 0;
  706. nz |= non_zero << n;
  707. }
  708. }
  709. } else {
  710. for (n = 0; n < 16; n += 2) {
  711. // Zero-out the first coeff, so that: a) nz is correct below, and
  712. // b) finding 'last' non-zero coeffs in SetResidualCoeffs() is simplified.
  713. tmp[n][0] = tmp[n + 1][0] = 0;
  714. nz |= VP8EncQuantize2Blocks(tmp[n], rd->y_ac_levels[n], &dqm->y1_) << n;
  715. assert(rd->y_ac_levels[n + 0][0] == 0);
  716. assert(rd->y_ac_levels[n + 1][0] == 0);
  717. }
  718. }
  719. // Transform back
  720. VP8TransformWHT(dc_tmp, tmp[0]);
  721. for (n = 0; n < 16; n += 2) {
  722. VP8ITransform(ref + VP8Scan[n], tmp[n], yuv_out + VP8Scan[n], 1);
  723. }
  724. return nz;
  725. }
  726. static int ReconstructIntra4(VP8EncIterator* const it,
  727. int16_t levels[16],
  728. const uint8_t* const src,
  729. uint8_t* const yuv_out,
  730. int mode) {
  731. const VP8Encoder* const enc = it->enc_;
  732. const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
  733. const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
  734. int nz = 0;
  735. int16_t tmp[16];
  736. VP8FTransform(src, ref, tmp);
  737. if (DO_TRELLIS_I4 && it->do_trellis_) {
  738. const int x = it->i4_ & 3, y = it->i4_ >> 2;
  739. const int ctx = it->top_nz_[x] + it->left_nz_[y];
  740. nz = TrellisQuantizeBlock(enc, tmp, levels, ctx, TYPE_I4_AC, &dqm->y1_,
  741. dqm->lambda_trellis_i4_);
  742. } else {
  743. nz = VP8EncQuantizeBlock(tmp, levels, &dqm->y1_);
  744. }
  745. VP8ITransform(ref, tmp, yuv_out, 0);
  746. return nz;
  747. }
  748. //------------------------------------------------------------------------------
  749. // DC-error diffusion
  750. // Diffusion weights. We under-correct a bit (15/16th of the error is actually
  751. // diffused) to avoid 'rainbow' chessboard pattern of blocks at q~=0.
  752. #define C1 7 // fraction of error sent to the 4x4 block below
  753. #define C2 8 // fraction of error sent to the 4x4 block on the right
  754. #define DSHIFT 4
  755. #define DSCALE 1 // storage descaling, needed to make the error fit int8_t
  756. // Quantize as usual, but also compute and return the quantization error.
  757. // Error is already divided by DSHIFT.
  758. static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) {
  759. int V = *v;
  760. const int sign = (V < 0);
  761. if (sign) V = -V;
  762. if (V > (int)mtx->zthresh_[0]) {
  763. const int qV = QUANTDIV(V, mtx->iq_[0], mtx->bias_[0]) * mtx->q_[0];
  764. const int err = (V - qV);
  765. *v = sign ? -qV : qV;
  766. return (sign ? -err : err) >> DSCALE;
  767. }
  768. *v = 0;
  769. return (sign ? -V : V) >> DSCALE;
  770. }
  771. static void CorrectDCValues(const VP8EncIterator* const it,
  772. const VP8Matrix* const mtx,
  773. int16_t tmp[][16], VP8ModeScore* const rd) {
  774. // | top[0] | top[1]
  775. // --------+--------+---------
  776. // left[0] | tmp[0] tmp[1] <-> err0 err1
  777. // left[1] | tmp[2] tmp[3] err2 err3
  778. //
  779. // Final errors {err1,err2,err3} are preserved and later restored
  780. // as top[]/left[] on the next block.
  781. int ch;
  782. for (ch = 0; ch <= 1; ++ch) {
  783. const int8_t* const top = it->top_derr_[it->x_][ch];
  784. const int8_t* const left = it->left_derr_[ch];
  785. int16_t (* const c)[16] = &tmp[ch * 4];
  786. int err0, err1, err2, err3;
  787. c[0][0] += (C1 * top[0] + C2 * left[0]) >> (DSHIFT - DSCALE);
  788. err0 = QuantizeSingle(&c[0][0], mtx);
  789. c[1][0] += (C1 * top[1] + C2 * err0) >> (DSHIFT - DSCALE);
  790. err1 = QuantizeSingle(&c[1][0], mtx);
  791. c[2][0] += (C1 * err0 + C2 * left[1]) >> (DSHIFT - DSCALE);
  792. err2 = QuantizeSingle(&c[2][0], mtx);
  793. c[3][0] += (C1 * err1 + C2 * err2) >> (DSHIFT - DSCALE);
  794. err3 = QuantizeSingle(&c[3][0], mtx);
  795. // error 'err' is bounded by mtx->q_[0] which is 132 at max. Hence
  796. // err >> DSCALE will fit in an int8_t type if DSCALE>=1.
  797. assert(abs(err1) <= 127 && abs(err2) <= 127 && abs(err3) <= 127);
  798. rd->derr[ch][0] = (int8_t)err1;
  799. rd->derr[ch][1] = (int8_t)err2;
  800. rd->derr[ch][2] = (int8_t)err3;
  801. }
  802. }
  803. static void StoreDiffusionErrors(VP8EncIterator* const it,
  804. const VP8ModeScore* const rd) {
  805. int ch;
  806. for (ch = 0; ch <= 1; ++ch) {
  807. int8_t* const top = it->top_derr_[it->x_][ch];
  808. int8_t* const left = it->left_derr_[ch];
  809. left[0] = rd->derr[ch][0]; // restore err1
  810. left[1] = 3 * rd->derr[ch][2] >> 2; // ... 3/4th of err3
  811. top[0] = rd->derr[ch][1]; // ... err2
  812. top[1] = rd->derr[ch][2] - left[1]; // ... 1/4th of err3.
  813. }
  814. }
  815. #undef C1
  816. #undef C2
  817. #undef DSHIFT
  818. #undef DSCALE
  819. //------------------------------------------------------------------------------
  820. static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
  821. uint8_t* const yuv_out, int mode) {
  822. const VP8Encoder* const enc = it->enc_;
  823. const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
  824. const uint8_t* const src = it->yuv_in_ + U_OFF_ENC;
  825. const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
  826. int nz = 0;
  827. int n;
  828. int16_t tmp[8][16];
  829. for (n = 0; n < 8; n += 2) {
  830. VP8FTransform2(src + VP8ScanUV[n], ref + VP8ScanUV[n], tmp[n]);
  831. }
  832. if (it->top_derr_ != NULL) CorrectDCValues(it, &dqm->uv_, tmp, rd);
  833. if (DO_TRELLIS_UV && it->do_trellis_) {
  834. int ch, x, y;
  835. for (ch = 0, n = 0; ch <= 2; ch += 2) {
  836. for (y = 0; y < 2; ++y) {
  837. for (x = 0; x < 2; ++x, ++n) {
  838. const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
  839. const int non_zero = TrellisQuantizeBlock(
  840. enc, tmp[n], rd->uv_levels[n], ctx, TYPE_CHROMA_A, &dqm->uv_,
  841. dqm->lambda_trellis_uv_);
  842. it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = non_zero;
  843. nz |= non_zero << n;
  844. }
  845. }
  846. }
  847. } else {
  848. for (n = 0; n < 8; n += 2) {
  849. nz |= VP8EncQuantize2Blocks(tmp[n], rd->uv_levels[n], &dqm->uv_) << n;
  850. }
  851. }
  852. for (n = 0; n < 8; n += 2) {
  853. VP8ITransform(ref + VP8ScanUV[n], tmp[n], yuv_out + VP8ScanUV[n], 1);
  854. }
  855. return (nz << 16);
  856. }
  857. //------------------------------------------------------------------------------
  858. // RD-opt decision. Reconstruct each modes, evalue distortion and bit-cost.
  859. // Pick the mode is lower RD-cost = Rate + lambda * Distortion.
  860. static void StoreMaxDelta(VP8SegmentInfo* const dqm, const int16_t DCs[16]) {
  861. // We look at the first three AC coefficients to determine what is the average
  862. // delta between each sub-4x4 block.
  863. const int v0 = abs(DCs[1]);
  864. const int v1 = abs(DCs[2]);
  865. const int v2 = abs(DCs[4]);
  866. int max_v = (v1 > v0) ? v1 : v0;
  867. max_v = (v2 > max_v) ? v2 : max_v;
  868. if (max_v > dqm->max_edge_) dqm->max_edge_ = max_v;
  869. }
  870. static void SwapModeScore(VP8ModeScore** a, VP8ModeScore** b) {
  871. VP8ModeScore* const tmp = *a;
  872. *a = *b;
  873. *b = tmp;
  874. }
  875. static void SwapPtr(uint8_t** a, uint8_t** b) {
  876. uint8_t* const tmp = *a;
  877. *a = *b;
  878. *b = tmp;
  879. }
  880. static void SwapOut(VP8EncIterator* const it) {
  881. SwapPtr(&it->yuv_out_, &it->yuv_out2_);
  882. }
  883. static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
  884. const int kNumBlocks = 16;
  885. VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
  886. const int lambda = dqm->lambda_i16_;
  887. const int tlambda = dqm->tlambda_;
  888. const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC;
  889. VP8ModeScore rd_tmp;
  890. VP8ModeScore* rd_cur = &rd_tmp;
  891. VP8ModeScore* rd_best = rd;
  892. int mode;
  893. int is_flat = IsFlatSource16(it->yuv_in_ + Y_OFF_ENC);
  894. rd->mode_i16 = -1;
  895. for (mode = 0; mode < NUM_PRED_MODES; ++mode) {
  896. uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF_ENC; // scratch buffer
  897. rd_cur->mode_i16 = mode;
  898. // Reconstruct
  899. rd_cur->nz = ReconstructIntra16(it, rd_cur, tmp_dst, mode);
  900. // Measure RD-score
  901. rd_cur->D = VP8SSE16x16(src, tmp_dst);
  902. rd_cur->SD =
  903. tlambda ? MULT_8B(tlambda, VP8TDisto16x16(src, tmp_dst, kWeightY)) : 0;
  904. rd_cur->H = VP8FixedCostsI16[mode];
  905. rd_cur->R = VP8GetCostLuma16(it, rd_cur);
  906. if (is_flat) {
  907. // refine the first impression (which was in pixel space)
  908. is_flat = IsFlat(rd_cur->y_ac_levels[0], kNumBlocks, FLATNESS_LIMIT_I16);
  909. if (is_flat) {
  910. // Block is very flat. We put emphasis on the distortion being very low!
  911. rd_cur->D *= 2;
  912. rd_cur->SD *= 2;
  913. }
  914. }
  915. // Since we always examine Intra16 first, we can overwrite *rd directly.
  916. SetRDScore(lambda, rd_cur);
  917. if (mode == 0 || rd_cur->score < rd_best->score) {
  918. SwapModeScore(&rd_cur, &rd_best);
  919. SwapOut(it);
  920. }
  921. }
  922. if (rd_best != rd) {
  923. memcpy(rd, rd_best, sizeof(*rd));
  924. }
  925. SetRDScore(dqm->lambda_mode_, rd); // finalize score for mode decision.
  926. VP8SetIntra16Mode(it, rd->mode_i16);
  927. // we have a blocky macroblock (only DCs are non-zero) with fairly high
  928. // distortion, record max delta so we can later adjust the minimal filtering
  929. // strength needed to smooth these blocks out.
  930. if ((rd->nz & 0x100ffff) == 0x1000000 && rd->D > dqm->min_disto_) {
  931. StoreMaxDelta(dqm, rd->y_dc_levels);
  932. }
  933. }
  934. //------------------------------------------------------------------------------
  935. // return the cost array corresponding to the surrounding prediction modes.
  936. static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
  937. const uint8_t modes[16]) {
  938. const int preds_w = it->enc_->preds_w_;
  939. const int x = (it->i4_ & 3), y = it->i4_ >> 2;
  940. const int left = (x == 0) ? it->preds_[y * preds_w - 1] : modes[it->i4_ - 1];
  941. const int top = (y == 0) ? it->preds_[-preds_w + x] : modes[it->i4_ - 4];
  942. return VP8FixedCostsI4[top][left];
  943. }
  944. static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
  945. const VP8Encoder* const enc = it->enc_;
  946. const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
  947. const int lambda = dqm->lambda_i4_;
  948. const int tlambda = dqm->tlambda_;
  949. const uint8_t* const src0 = it->yuv_in_ + Y_OFF_ENC;
  950. uint8_t* const best_blocks = it->yuv_out2_ + Y_OFF_ENC;
  951. int total_header_bits = 0;
  952. VP8ModeScore rd_best;
  953. if (enc->max_i4_header_bits_ == 0) {
  954. return 0;
  955. }
  956. InitScore(&rd_best);
  957. rd_best.H = 211; // '211' is the value of VP8BitCost(0, 145)
  958. SetRDScore(dqm->lambda_mode_, &rd_best);
  959. VP8IteratorStartI4(it);
  960. do {
  961. const int kNumBlocks = 1;
  962. VP8ModeScore rd_i4;
  963. int mode;
  964. int best_mode = -1;
  965. const uint8_t* const src = src0 + VP8Scan[it->i4_];
  966. const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4);
  967. uint8_t* best_block = best_blocks + VP8Scan[it->i4_];
  968. uint8_t* tmp_dst = it->yuv_p_ + I4TMP; // scratch buffer.
  969. InitScore(&rd_i4);
  970. VP8MakeIntra4Preds(it);
  971. for (mode = 0; mode < NUM_BMODES; ++mode) {
  972. VP8ModeScore rd_tmp;
  973. int16_t tmp_levels[16];
  974. // Reconstruct
  975. rd_tmp.nz =
  976. ReconstructIntra4(it, tmp_levels, src, tmp_dst, mode) << it->i4_;
  977. // Compute RD-score
  978. rd_tmp.D = VP8SSE4x4(src, tmp_dst);
  979. rd_tmp.SD =
  980. tlambda ? MULT_8B(tlambda, VP8TDisto4x4(src, tmp_dst, kWeightY))
  981. : 0;
  982. rd_tmp.H = mode_costs[mode];
  983. // Add flatness penalty, to avoid flat area to be mispredicted
  984. // by a complex mode.
  985. if (mode > 0 && IsFlat(tmp_levels, kNumBlocks, FLATNESS_LIMIT_I4)) {
  986. rd_tmp.R = FLATNESS_PENALTY * kNumBlocks;
  987. } else {
  988. rd_tmp.R = 0;
  989. }
  990. // early-out check
  991. SetRDScore(lambda, &rd_tmp);
  992. if (best_mode >= 0 && rd_tmp.score >= rd_i4.score) continue;
  993. // finish computing score
  994. rd_tmp.R += VP8GetCostLuma4(it, tmp_levels);
  995. SetRDScore(lambda, &rd_tmp);
  996. if (best_mode < 0 || rd_tmp.score < rd_i4.score) {
  997. CopyScore(&rd_i4, &rd_tmp);
  998. best_mode = mode;
  999. SwapPtr(&tmp_dst, &best_block);
  1000. memcpy(rd_best.y_ac_levels[it->i4_], tmp_levels,
  1001. sizeof(rd_best.y_ac_levels[it->i4_]));
  1002. }
  1003. }
  1004. SetRDScore(dqm->lambda_mode_, &rd_i4);
  1005. AddScore(&rd_best, &rd_i4);
  1006. if (rd_best.score >= rd->score) {
  1007. return 0;
  1008. }
  1009. total_header_bits += (int)rd_i4.H; // <- equal to mode_costs[best_mode];
  1010. if (total_header_bits > enc->max_i4_header_bits_) {
  1011. return 0;
  1012. }
  1013. // Copy selected samples if not in the right place already.
  1014. if (best_block != best_blocks + VP8Scan[it->i4_]) {
  1015. VP8Copy4x4(best_block, best_blocks + VP8Scan[it->i4_]);
  1016. }
  1017. rd->modes_i4[it->i4_] = best_mode;
  1018. it->top_nz_[it->i4_ & 3] = it->left_nz_[it->i4_ >> 2] = (rd_i4.nz ? 1 : 0);
  1019. } while (VP8IteratorRotateI4(it, best_blocks));
  1020. // finalize state
  1021. CopyScore(rd, &rd_best);
  1022. VP8SetIntra4Mode(it, rd->modes_i4);
  1023. SwapOut(it);
  1024. memcpy(rd->y_ac_levels, rd_best.y_ac_levels, sizeof(rd->y_ac_levels));
  1025. return 1; // select intra4x4 over intra16x16
  1026. }
  1027. //------------------------------------------------------------------------------
  1028. static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
  1029. const int kNumBlocks = 8;
  1030. const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
  1031. const int lambda = dqm->lambda_uv_;
  1032. const uint8_t* const src = it->yuv_in_ + U_OFF_ENC;
  1033. uint8_t* tmp_dst = it->yuv_out2_ + U_OFF_ENC; // scratch buffer
  1034. uint8_t* dst0 = it->yuv_out_ + U_OFF_ENC;
  1035. uint8_t* dst = dst0;
  1036. VP8ModeScore rd_best;
  1037. int mode;
  1038. rd->mode_uv = -1;
  1039. InitScore(&rd_best);
  1040. for (mode = 0; mode < NUM_PRED_MODES; ++mode) {
  1041. VP8ModeScore rd_uv;
  1042. // Reconstruct
  1043. rd_uv.nz = ReconstructUV(it, &rd_uv, tmp_dst, mode);
  1044. // Compute RD-score
  1045. rd_uv.D = VP8SSE16x8(src, tmp_dst);
  1046. rd_uv.SD = 0; // not calling TDisto here: it tends to flatten areas.
  1047. rd_uv.H = VP8FixedCostsUV[mode];
  1048. rd_uv.R = VP8GetCostUV(it, &rd_uv);
  1049. if (mode > 0 && IsFlat(rd_uv.uv_levels[0], kNumBlocks, FLATNESS_LIMIT_UV)) {
  1050. rd_uv.R += FLATNESS_PENALTY * kNumBlocks;
  1051. }
  1052. SetRDScore(lambda, &rd_uv);
  1053. if (mode == 0 || rd_uv.score < rd_best.score) {
  1054. CopyScore(&rd_best, &rd_uv);
  1055. rd->mode_uv = mode;
  1056. memcpy(rd->uv_levels, rd_uv.uv_levels, sizeof(rd->uv_levels));
  1057. if (it->top_derr_ != NULL) {
  1058. memcpy(rd->derr, rd_uv.derr, sizeof(rd_uv.derr));
  1059. }
  1060. SwapPtr(&dst, &tmp_dst);
  1061. }
  1062. }
  1063. VP8SetIntraUVMode(it, rd->mode_uv);
  1064. AddScore(rd, &rd_best);
  1065. if (dst != dst0) { // copy 16x8 block if needed
  1066. VP8Copy16x8(dst, dst0);
  1067. }
  1068. if (it->top_derr_ != NULL) { // store diffusion errors for next block
  1069. StoreDiffusionErrors(it, rd);
  1070. }
  1071. }
  1072. //------------------------------------------------------------------------------
  1073. // Final reconstruction and quantization.
  1074. static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
  1075. const VP8Encoder* const enc = it->enc_;
  1076. const int is_i16 = (it->mb_->type_ == 1);
  1077. int nz = 0;
  1078. if (is_i16) {
  1079. nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF_ENC, it->preds_[0]);
  1080. } else {
  1081. VP8IteratorStartI4(it);
  1082. do {
  1083. const int mode =
  1084. it->preds_[(it->i4_ & 3) + (it->i4_ >> 2) * enc->preds_w_];
  1085. const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC + VP8Scan[it->i4_];
  1086. uint8_t* const dst = it->yuv_out_ + Y_OFF_ENC + VP8Scan[it->i4_];
  1087. VP8MakeIntra4Preds(it);
  1088. nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_],
  1089. src, dst, mode) << it->i4_;
  1090. } while (VP8IteratorRotateI4(it, it->yuv_out_ + Y_OFF_ENC));
  1091. }
  1092. nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF_ENC, it->mb_->uv_mode_);
  1093. rd->nz = nz;
  1094. }
  1095. // Refine intra16/intra4 sub-modes based on distortion only (not rate).
  1096. static void RefineUsingDistortion(VP8EncIterator* const it,
  1097. int try_both_modes, int refine_uv_mode,
  1098. VP8ModeScore* const rd) {
  1099. score_t best_score = MAX_COST;
  1100. int nz = 0;
  1101. int mode;
  1102. int is_i16 = try_both_modes || (it->mb_->type_ == 1);
  1103. const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
  1104. // Some empiric constants, of approximate order of magnitude.
  1105. const int lambda_d_i16 = 106;
  1106. const int lambda_d_i4 = 11;
  1107. const int lambda_d_uv = 120;
  1108. score_t score_i4 = dqm->i4_penalty_;
  1109. score_t i4_bit_sum = 0;
  1110. const score_t bit_limit = try_both_modes ? it->enc_->mb_header_limit_
  1111. : MAX_COST; // no early-out allowed
  1112. if (is_i16) { // First, evaluate Intra16 distortion
  1113. int best_mode = -1;
  1114. const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC;
  1115. for (mode = 0; mode < NUM_PRED_MODES; ++mode) {
  1116. const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
  1117. const score_t score = (score_t)VP8SSE16x16(src, ref) * RD_DISTO_MULT
  1118. + VP8FixedCostsI16[mode] * lambda_d_i16;
  1119. if (mode > 0 && VP8FixedCostsI16[mode] > bit_limit) {
  1120. continue;
  1121. }
  1122. if (score < best_score) {
  1123. best_mode = mode;
  1124. best_score = score;
  1125. }
  1126. }
  1127. if (it->x_ == 0 || it->y_ == 0) {
  1128. // avoid starting a checkerboard resonance from the border. See bug #432.
  1129. if (IsFlatSource16(src)) {
  1130. best_mode = (it->x_ == 0) ? 0 : 2;
  1131. try_both_modes = 0; // stick to i16
  1132. }
  1133. }
  1134. VP8SetIntra16Mode(it, best_mode);
  1135. // we'll reconstruct later, if i16 mode actually gets selected
  1136. }
  1137. // Next, evaluate Intra4
  1138. if (try_both_modes || !is_i16) {
  1139. // We don't evaluate the rate here, but just account for it through a
  1140. // constant penalty (i4 mode usually needs more bits compared to i16).
  1141. is_i16 = 0;
  1142. VP8IteratorStartI4(it);
  1143. do {
  1144. int best_i4_mode = -1;
  1145. score_t best_i4_score = MAX_COST;
  1146. const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC + VP8Scan[it->i4_];
  1147. const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4);
  1148. VP8MakeIntra4Preds(it);
  1149. for (mode = 0; mode < NUM_BMODES; ++mode) {
  1150. const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
  1151. const score_t score = VP8SSE4x4(src, ref) * RD_DISTO_MULT
  1152. + mode_costs[mode] * lambda_d_i4;
  1153. if (score < best_i4_score) {
  1154. best_i4_mode = mode;
  1155. best_i4_score = score;
  1156. }
  1157. }
  1158. i4_bit_sum += mode_costs[best_i4_mode];
  1159. rd->modes_i4[it->i4_] = best_i4_mode;
  1160. score_i4 += best_i4_score;
  1161. if (score_i4 >= best_score || i4_bit_sum > bit_limit) {
  1162. // Intra4 won't be better than Intra16. Bail out and pick Intra16.
  1163. is_i16 = 1;
  1164. break;
  1165. } else { // reconstruct partial block inside yuv_out2_ buffer
  1166. uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF_ENC + VP8Scan[it->i4_];
  1167. nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_],
  1168. src, tmp_dst, best_i4_mode) << it->i4_;
  1169. }
  1170. } while (VP8IteratorRotateI4(it, it->yuv_out2_ + Y_OFF_ENC));
  1171. }
  1172. // Final reconstruction, depending on which mode is selected.
  1173. if (!is_i16) {
  1174. VP8SetIntra4Mode(it, rd->modes_i4);
  1175. SwapOut(it);
  1176. best_score = score_i4;
  1177. } else {
  1178. nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF_ENC, it->preds_[0]);
  1179. }
  1180. // ... and UV!
  1181. if (refine_uv_mode) {
  1182. int best_mode = -1;
  1183. score_t best_uv_score = MAX_COST;
  1184. const uint8_t* const src = it->yuv_in_ + U_OFF_ENC;
  1185. for (mode = 0; mode < NUM_PRED_MODES; ++mode) {
  1186. const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
  1187. const score_t score = VP8SSE16x8(src, ref) * RD_DISTO_MULT
  1188. + VP8FixedCostsUV[mode] * lambda_d_uv;
  1189. if (score < best_uv_score) {
  1190. best_mode = mode;
  1191. best_uv_score = score;
  1192. }
  1193. }
  1194. VP8SetIntraUVMode(it, best_mode);
  1195. }
  1196. nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF_ENC, it->mb_->uv_mode_);
  1197. rd->nz = nz;
  1198. rd->score = best_score;
  1199. }
  1200. //------------------------------------------------------------------------------
  1201. // Entry point
  1202. int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
  1203. VP8RDLevel rd_opt) {
  1204. int is_skipped;
  1205. const int method = it->enc_->method_;
  1206. InitScore(rd);
  1207. // We can perform predictions for Luma16x16 and Chroma8x8 already.
  1208. // Luma4x4 predictions needs to be done as-we-go.
  1209. VP8MakeLuma16Preds(it);
  1210. VP8MakeChroma8Preds(it);
  1211. if (rd_opt > RD_OPT_NONE) {
  1212. it->do_trellis_ = (rd_opt >= RD_OPT_TRELLIS_ALL);
  1213. PickBestIntra16(it, rd);
  1214. if (method >= 2) {
  1215. PickBestIntra4(it, rd);
  1216. }
  1217. PickBestUV(it, rd);
  1218. if (rd_opt == RD_OPT_TRELLIS) { // finish off with trellis-optim now
  1219. it->do_trellis_ = 1;
  1220. SimpleQuantize(it, rd);
  1221. }
  1222. } else {
  1223. // At this point we have heuristically decided intra16 / intra4.
  1224. // For method >= 2, pick the best intra4/intra16 based on SSE (~tad slower).
  1225. // For method <= 1, we don't re-examine the decision but just go ahead with
  1226. // quantization/reconstruction.
  1227. RefineUsingDistortion(it, (method >= 2), (method >= 1), rd);
  1228. }
  1229. is_skipped = (rd->nz == 0);
  1230. VP8SetSkip(it, is_skipped);
  1231. return is_skipped;
  1232. }