vf_colorspace.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /*
  21. * @file
  22. * Convert between colorspaces.
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/pixdesc.h"
  27. #include "libavutil/pixfmt.h"
  28. #include "avfilter.h"
  29. #include "colorspacedsp.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. enum DitherMode {
  34. DITHER_NONE,
  35. DITHER_FSB,
  36. DITHER_NB,
  37. };
  38. enum Colorspace {
  39. CS_UNSPECIFIED,
  40. CS_BT470M,
  41. CS_BT470BG,
  42. CS_BT601_6_525,
  43. CS_BT601_6_625,
  44. CS_BT709,
  45. CS_SMPTE170M,
  46. CS_SMPTE240M,
  47. CS_BT2020,
  48. CS_NB,
  49. };
  50. enum Whitepoint {
  51. WP_D65,
  52. WP_C,
  53. WP_NB,
  54. };
  55. enum WhitepointAdaptation {
  56. WP_ADAPT_BRADFORD,
  57. WP_ADAPT_VON_KRIES,
  58. NB_WP_ADAPT_NON_IDENTITY,
  59. WP_ADAPT_IDENTITY = NB_WP_ADAPT_NON_IDENTITY,
  60. NB_WP_ADAPT,
  61. };
  62. static const enum AVColorTransferCharacteristic default_trc[CS_NB + 1] = {
  63. [CS_UNSPECIFIED] = AVCOL_TRC_UNSPECIFIED,
  64. [CS_BT470M] = AVCOL_TRC_GAMMA22,
  65. [CS_BT470BG] = AVCOL_TRC_GAMMA28,
  66. [CS_BT601_6_525] = AVCOL_TRC_SMPTE170M,
  67. [CS_BT601_6_625] = AVCOL_TRC_SMPTE170M,
  68. [CS_BT709] = AVCOL_TRC_BT709,
  69. [CS_SMPTE170M] = AVCOL_TRC_SMPTE170M,
  70. [CS_SMPTE240M] = AVCOL_TRC_SMPTE240M,
  71. [CS_BT2020] = AVCOL_TRC_BT2020_10,
  72. [CS_NB] = AVCOL_TRC_UNSPECIFIED,
  73. };
  74. static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
  75. [CS_UNSPECIFIED] = AVCOL_PRI_UNSPECIFIED,
  76. [CS_BT470M] = AVCOL_PRI_BT470M,
  77. [CS_BT470BG] = AVCOL_PRI_BT470BG,
  78. [CS_BT601_6_525] = AVCOL_PRI_SMPTE170M,
  79. [CS_BT601_6_625] = AVCOL_PRI_BT470BG,
  80. [CS_BT709] = AVCOL_PRI_BT709,
  81. [CS_SMPTE170M] = AVCOL_PRI_SMPTE170M,
  82. [CS_SMPTE240M] = AVCOL_PRI_SMPTE240M,
  83. [CS_BT2020] = AVCOL_PRI_BT2020,
  84. [CS_NB] = AVCOL_PRI_UNSPECIFIED,
  85. };
  86. static const enum AVColorSpace default_csp[CS_NB + 1] = {
  87. [CS_UNSPECIFIED] = AVCOL_SPC_UNSPECIFIED,
  88. [CS_BT470M] = AVCOL_SPC_SMPTE170M,
  89. [CS_BT470BG] = AVCOL_SPC_BT470BG,
  90. [CS_BT601_6_525] = AVCOL_SPC_SMPTE170M,
  91. [CS_BT601_6_625] = AVCOL_SPC_BT470BG,
  92. [CS_BT709] = AVCOL_SPC_BT709,
  93. [CS_SMPTE170M] = AVCOL_SPC_SMPTE170M,
  94. [CS_SMPTE240M] = AVCOL_SPC_SMPTE240M,
  95. [CS_BT2020] = AVCOL_SPC_BT2020_NCL,
  96. [CS_NB] = AVCOL_SPC_UNSPECIFIED,
  97. };
  98. struct ColorPrimaries {
  99. enum Whitepoint wp;
  100. double xr, yr, xg, yg, xb, yb;
  101. };
  102. struct TransferCharacteristics {
  103. double alpha, beta, gamma, delta;
  104. };
  105. struct LumaCoefficients {
  106. double cr, cg, cb;
  107. };
  108. struct WhitepointCoefficients {
  109. double xw, yw;
  110. };
  111. typedef struct ColorSpaceContext {
  112. const AVClass *class;
  113. ColorSpaceDSPContext dsp;
  114. enum Colorspace user_all;
  115. enum AVColorSpace in_csp, out_csp, user_csp;
  116. enum AVColorRange in_rng, out_rng, user_rng;
  117. enum AVColorTransferCharacteristic in_trc, out_trc, user_trc;
  118. enum AVColorPrimaries in_prm, out_prm, user_prm;
  119. enum AVPixelFormat in_format, user_format;
  120. int fast_mode;
  121. enum DitherMode dither;
  122. enum WhitepointAdaptation wp_adapt;
  123. int16_t *rgb[3];
  124. ptrdiff_t rgb_stride;
  125. unsigned rgb_sz;
  126. int *dither_scratch[3][2], *dither_scratch_base[3][2];
  127. const struct ColorPrimaries *in_primaries, *out_primaries;
  128. int lrgb2lrgb_passthrough;
  129. DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
  130. const struct TransferCharacteristics *in_txchr, *out_txchr;
  131. int rgb2rgb_passthrough;
  132. int16_t *lin_lut, *delin_lut;
  133. const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
  134. int yuv2yuv_passthrough, yuv2yuv_fastmode;
  135. DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
  136. DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
  137. DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
  138. DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
  139. yuv2rgb_fn yuv2rgb;
  140. rgb2yuv_fn rgb2yuv;
  141. rgb2yuv_fsb_fn rgb2yuv_fsb;
  142. yuv2yuv_fn yuv2yuv;
  143. double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
  144. int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
  145. } ColorSpaceContext;
  146. // FIXME deal with odd width/heights (or just forbid it)
  147. // FIXME faster linearize/delinearize implementation (integer pow)
  148. // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
  149. // FIXME test that the values in (de)lin_lut don't exceed their container storage
  150. // type size (only useful if we keep the LUT and don't move to fast integer pow)
  151. // FIXME dithering if bitdepth goes down?
  152. // FIXME bitexact for fate integration?
  153. /*
  154. * All constants explained in e.g. https://linuxtv.org/downloads/v4l-dvb-apis/ch02s06.html
  155. * The older ones (bt470bg/m) are also explained in their respective ITU docs
  156. * (e.g. https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.470-5-199802-S!!PDF-E.pdf)
  157. * whereas the newer ones can typically be copied directly from wikipedia :)
  158. */
  159. static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB] = {
  160. [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
  161. [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
  162. [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
  163. [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
  164. [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
  165. [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
  166. [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
  167. };
  168. static const struct LumaCoefficients *get_luma_coefficients(enum AVColorSpace csp)
  169. {
  170. const struct LumaCoefficients *coeffs;
  171. if (csp >= AVCOL_SPC_NB)
  172. return NULL;
  173. coeffs = &luma_coefficients[csp];
  174. if (!coeffs->cr)
  175. return NULL;
  176. return coeffs;
  177. }
  178. static void fill_rgb2yuv_table(const struct LumaCoefficients *coeffs,
  179. double rgb2yuv[3][3])
  180. {
  181. double bscale, rscale;
  182. rgb2yuv[0][0] = coeffs->cr;
  183. rgb2yuv[0][1] = coeffs->cg;
  184. rgb2yuv[0][2] = coeffs->cb;
  185. bscale = 0.5 / (coeffs->cb - 1.0);
  186. rscale = 0.5 / (coeffs->cr - 1.0);
  187. rgb2yuv[1][0] = bscale * coeffs->cr;
  188. rgb2yuv[1][1] = bscale * coeffs->cg;
  189. rgb2yuv[1][2] = 0.5;
  190. rgb2yuv[2][0] = 0.5;
  191. rgb2yuv[2][1] = rscale * coeffs->cg;
  192. rgb2yuv[2][2] = rscale * coeffs->cb;
  193. }
  194. // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
  195. // find any actual tables that document their real values...
  196. // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
  197. static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB] = {
  198. [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
  199. [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
  200. [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
  201. [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
  202. [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
  203. [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
  204. [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
  205. };
  206. static const struct TransferCharacteristics *
  207. get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
  208. {
  209. const struct TransferCharacteristics *coeffs;
  210. if (trc >= AVCOL_TRC_NB)
  211. return NULL;
  212. coeffs = &transfer_characteristics[trc];
  213. if (!coeffs->alpha)
  214. return NULL;
  215. return coeffs;
  216. }
  217. static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB] = {
  218. [WP_D65] = { 0.3127, 0.3290 },
  219. [WP_C] = { 0.3100, 0.3160 },
  220. };
  221. static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB] = {
  222. [AVCOL_PRI_BT709] = { WP_D65, 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 },
  223. [AVCOL_PRI_BT470M] = { WP_C, 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 },
  224. [AVCOL_PRI_BT470BG] = { WP_D65, 0.640, 0.330, 0.290, 0.600, 0.150, 0.060,},
  225. [AVCOL_PRI_SMPTE170M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
  226. [AVCOL_PRI_SMPTE240M] = { WP_D65, 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 },
  227. [AVCOL_PRI_BT2020] = { WP_D65, 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 },
  228. };
  229. static const struct ColorPrimaries *get_color_primaries(enum AVColorPrimaries prm)
  230. {
  231. const struct ColorPrimaries *coeffs;
  232. if (prm >= AVCOL_PRI_NB)
  233. return NULL;
  234. coeffs = &color_primaries[prm];
  235. if (!coeffs->xr)
  236. return NULL;
  237. return coeffs;
  238. }
  239. static void invert_matrix3x3(const double in[3][3], double out[3][3])
  240. {
  241. double m00 = in[0][0], m01 = in[0][1], m02 = in[0][2],
  242. m10 = in[1][0], m11 = in[1][1], m12 = in[1][2],
  243. m20 = in[2][0], m21 = in[2][1], m22 = in[2][2];
  244. int i, j;
  245. double det;
  246. out[0][0] = (m11 * m22 - m21 * m12);
  247. out[0][1] = -(m01 * m22 - m21 * m02);
  248. out[0][2] = (m01 * m12 - m11 * m02);
  249. out[1][0] = -(m10 * m22 - m20 * m12);
  250. out[1][1] = (m00 * m22 - m20 * m02);
  251. out[1][2] = -(m00 * m12 - m10 * m02);
  252. out[2][0] = (m10 * m21 - m20 * m11);
  253. out[2][1] = -(m00 * m21 - m20 * m01);
  254. out[2][2] = (m00 * m11 - m10 * m01);
  255. det = m00 * out[0][0] + m10 * out[0][1] + m20 * out[0][2];
  256. det = 1.0 / det;
  257. for (i = 0; i < 3; i++) {
  258. for (j = 0; j < 3; j++)
  259. out[i][j] *= det;
  260. }
  261. }
  262. static int fill_gamma_table(ColorSpaceContext *s)
  263. {
  264. int n;
  265. double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
  266. double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
  267. double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
  268. double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
  269. double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
  270. s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
  271. if (!s->lin_lut)
  272. return AVERROR(ENOMEM);
  273. s->delin_lut = &s->lin_lut[32768];
  274. for (n = 0; n < 32768; n++) {
  275. double v = (n - 2048.0) / 28672.0, d, l;
  276. // delinearize
  277. if (v <= -out_beta) {
  278. d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
  279. } else if (v < out_beta) {
  280. d = out_delta * v;
  281. } else {
  282. d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
  283. }
  284. s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
  285. // linearize
  286. if (v <= -in_beta) {
  287. l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
  288. } else if (v < in_beta) {
  289. l = v * in_idelta;
  290. } else {
  291. l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
  292. }
  293. s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
  294. }
  295. return 0;
  296. }
  297. /*
  298. * see e.g. http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
  299. */
  300. static void fill_rgb2xyz_table(const struct ColorPrimaries *coeffs,
  301. double rgb2xyz[3][3])
  302. {
  303. const struct WhitepointCoefficients *wp = &whitepoint_coefficients[coeffs->wp];
  304. double i[3][3], sr, sg, sb, zw;
  305. rgb2xyz[0][0] = coeffs->xr / coeffs->yr;
  306. rgb2xyz[0][1] = coeffs->xg / coeffs->yg;
  307. rgb2xyz[0][2] = coeffs->xb / coeffs->yb;
  308. rgb2xyz[1][0] = rgb2xyz[1][1] = rgb2xyz[1][2] = 1.0;
  309. rgb2xyz[2][0] = (1.0 - coeffs->xr - coeffs->yr) / coeffs->yr;
  310. rgb2xyz[2][1] = (1.0 - coeffs->xg - coeffs->yg) / coeffs->yg;
  311. rgb2xyz[2][2] = (1.0 - coeffs->xb - coeffs->yb) / coeffs->yb;
  312. invert_matrix3x3(rgb2xyz, i);
  313. zw = 1.0 - wp->xw - wp->yw;
  314. sr = i[0][0] * wp->xw + i[0][1] * wp->yw + i[0][2] * zw;
  315. sg = i[1][0] * wp->xw + i[1][1] * wp->yw + i[1][2] * zw;
  316. sb = i[2][0] * wp->xw + i[2][1] * wp->yw + i[2][2] * zw;
  317. rgb2xyz[0][0] *= sr;
  318. rgb2xyz[0][1] *= sg;
  319. rgb2xyz[0][2] *= sb;
  320. rgb2xyz[1][0] *= sr;
  321. rgb2xyz[1][1] *= sg;
  322. rgb2xyz[1][2] *= sb;
  323. rgb2xyz[2][0] *= sr;
  324. rgb2xyz[2][1] *= sg;
  325. rgb2xyz[2][2] *= sb;
  326. }
  327. static void mul3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
  328. {
  329. int m, n;
  330. for (m = 0; m < 3; m++)
  331. for (n = 0; n < 3; n++)
  332. dst[m][n] = src2[m][0] * src1[0][n] +
  333. src2[m][1] * src1[1][n] +
  334. src2[m][2] * src1[2][n];
  335. }
  336. /*
  337. * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
  338. * This function uses the Bradford mechanism.
  339. */
  340. static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
  341. enum Whitepoint src, enum Whitepoint dst)
  342. {
  343. static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
  344. [WP_ADAPT_BRADFORD] = {
  345. { 0.8951, 0.2664, -0.1614 },
  346. { -0.7502, 1.7135, 0.0367 },
  347. { 0.0389, -0.0685, 1.0296 },
  348. }, [WP_ADAPT_VON_KRIES] = {
  349. { 0.40024, 0.70760, -0.08081 },
  350. { -0.22630, 1.16532, 0.04570 },
  351. { 0.00000, 0.00000, 0.91822 },
  352. },
  353. };
  354. const double (*ma)[3] = ma_tbl[wp_adapt];
  355. const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
  356. double zw_src = 1.0 - wp_src->xw - wp_src->yw;
  357. const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
  358. double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
  359. double mai[3][3], fac[3][3], tmp[3][3];
  360. double rs, gs, bs, rd, gd, bd;
  361. invert_matrix3x3(ma, mai);
  362. rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
  363. gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
  364. bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
  365. rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
  366. gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
  367. bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
  368. fac[0][0] = rd / rs;
  369. fac[1][1] = gd / gs;
  370. fac[2][2] = bd / bs;
  371. fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
  372. mul3x3(tmp, ma, fac);
  373. mul3x3(out, tmp, mai);
  374. }
  375. static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
  376. int w, int h, const int16_t *lut)
  377. {
  378. int y, x, n;
  379. for (n = 0; n < 3; n++) {
  380. int16_t *data = buf[n];
  381. for (y = 0; y < h; y++) {
  382. for (x = 0; x < w; x++)
  383. data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
  384. data += stride;
  385. }
  386. }
  387. }
  388. struct ThreadData {
  389. AVFrame *in, *out;
  390. ptrdiff_t in_linesize[3], out_linesize[3];
  391. int in_ss_h, out_ss_h;
  392. };
  393. static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
  394. {
  395. struct ThreadData *td = data;
  396. ColorSpaceContext *s = ctx->priv;
  397. uint8_t *in_data[3], *out_data[3];
  398. int16_t *rgb[3];
  399. int h_in = (td->in->height + 1) >> 1;
  400. int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
  401. int w = td->in->width, h = h2 - h1;
  402. in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
  403. in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
  404. in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
  405. out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
  406. out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
  407. out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
  408. rgb[0] = s->rgb[0] + s->rgb_stride * h1;
  409. rgb[1] = s->rgb[1] + s->rgb_stride * h1;
  410. rgb[2] = s->rgb[2] + s->rgb_stride * h1;
  411. // FIXME for simd, also make sure we do pictures with negative stride
  412. // top-down so we don't overwrite lines with padding of data before it
  413. // in the same buffer (same as swscale)
  414. if (s->yuv2yuv_fastmode) {
  415. // FIXME possibly use a fast mode in case only the y range changes?
  416. // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
  417. // are non-zero
  418. s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
  419. s->yuv2yuv_coeffs, s->yuv_offset);
  420. } else {
  421. // FIXME maybe (for caching effciency) do pipeline per-line instead of
  422. // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
  423. // 2 lines, for yuv420.)
  424. /*
  425. * General design:
  426. * - yuv2rgb converts from whatever range the input was ([16-235/240] or
  427. * [0,255] or the 10/12bpp equivalents thereof) to an integer version
  428. * of RGB in psuedo-restricted 15+sign bits. That means that the float
  429. * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
  430. * range is used for overflow/underflow outside the representable
  431. * range of this RGB type. rgb2yuv is the exact opposite.
  432. * - gamma correction is done using a LUT since that appears to work
  433. * fairly fast.
  434. * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
  435. * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
  436. * read chroma pixels at luma resolution. If you want some more fancy
  437. * filter, you can use swscale to convert to yuv444p.
  438. * - all coefficients are 14bit (so in the [-2.0,2.0] range).
  439. */
  440. s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
  441. s->yuv2rgb_coeffs, s->yuv_offset[0]);
  442. if (!s->rgb2rgb_passthrough) {
  443. apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
  444. if (!s->lrgb2lrgb_passthrough)
  445. s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
  446. apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
  447. }
  448. if (s->dither == DITHER_FSB) {
  449. s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
  450. s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
  451. } else {
  452. s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
  453. s->rgb2yuv_coeffs, s->yuv_offset[1]);
  454. }
  455. }
  456. return 0;
  457. }
  458. static int get_range_off(int *off, int *y_rng, int *uv_rng,
  459. enum AVColorRange rng, int depth)
  460. {
  461. switch (rng) {
  462. case AVCOL_RANGE_MPEG:
  463. *off = 16 << (depth - 8);
  464. *y_rng = 219 << (depth - 8);
  465. *uv_rng = 224 << (depth - 8);
  466. break;
  467. case AVCOL_RANGE_JPEG:
  468. *off = 0;
  469. *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
  470. break;
  471. default:
  472. return AVERROR(EINVAL);
  473. }
  474. return 0;
  475. }
  476. static int create_filtergraph(AVFilterContext *ctx,
  477. const AVFrame *in, const AVFrame *out)
  478. {
  479. ColorSpaceContext *s = ctx->priv;
  480. const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
  481. const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
  482. int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
  483. #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
  484. #define supported_subsampling(lcw, lch) \
  485. (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
  486. #define supported_format(d) \
  487. ((d) != NULL && (d)->nb_components == 3 && \
  488. !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
  489. supported_depth((d)->comp[0].depth) && \
  490. supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
  491. if (!supported_format(in_desc)) {
  492. av_log(ctx, AV_LOG_ERROR,
  493. "Unsupported input format %d (%s) or bitdepth (%d)\n",
  494. in->format, av_get_pix_fmt_name(in->format),
  495. in_desc ? in_desc->comp[0].depth : -1);
  496. return AVERROR(EINVAL);
  497. }
  498. if (!supported_format(out_desc)) {
  499. av_log(ctx, AV_LOG_ERROR,
  500. "Unsupported output format %d (%s) or bitdepth (%d)\n",
  501. out->format, av_get_pix_fmt_name(out->format),
  502. out_desc ? out_desc->comp[0].depth : -1);
  503. return AVERROR(EINVAL);
  504. }
  505. if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
  506. if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
  507. if (in->color_trc != s->in_trc) s->in_txchr = NULL;
  508. if (out->color_trc != s->out_trc) s->out_txchr = NULL;
  509. if (in->colorspace != s->in_csp ||
  510. in->color_range != s->in_rng) s->in_lumacoef = NULL;
  511. if (out->colorspace != s->out_csp ||
  512. out->color_range != s->out_rng) s->out_lumacoef = NULL;
  513. if (!s->out_primaries || !s->in_primaries) {
  514. s->in_prm = in->color_primaries;
  515. s->in_primaries = get_color_primaries(s->in_prm);
  516. if (!s->in_primaries) {
  517. av_log(ctx, AV_LOG_ERROR,
  518. "Unsupported input primaries %d (%s)\n",
  519. s->in_prm, av_color_primaries_name(s->in_prm));
  520. return AVERROR(EINVAL);
  521. }
  522. s->out_prm = out->color_primaries;
  523. s->out_primaries = get_color_primaries(s->out_prm);
  524. if (!s->out_primaries) {
  525. if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
  526. if (s->user_all == CS_UNSPECIFIED) {
  527. av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
  528. } else {
  529. av_log(ctx, AV_LOG_ERROR,
  530. "Unsupported output color property %d\n", s->user_all);
  531. }
  532. } else {
  533. av_log(ctx, AV_LOG_ERROR,
  534. "Unsupported output primaries %d (%s)\n",
  535. s->out_prm, av_color_primaries_name(s->out_prm));
  536. }
  537. return AVERROR(EINVAL);
  538. }
  539. s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
  540. sizeof(*s->in_primaries));
  541. if (!s->lrgb2lrgb_passthrough) {
  542. double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
  543. fill_rgb2xyz_table(s->out_primaries, rgb2xyz);
  544. invert_matrix3x3(rgb2xyz, xyz2rgb);
  545. fill_rgb2xyz_table(s->in_primaries, rgb2xyz);
  546. if (s->out_primaries->wp != s->in_primaries->wp &&
  547. s->wp_adapt != WP_ADAPT_IDENTITY) {
  548. double wpconv[3][3], tmp[3][3];
  549. fill_whitepoint_conv_table(wpconv, s->wp_adapt, s->in_primaries->wp,
  550. s->out_primaries->wp);
  551. mul3x3(tmp, rgb2xyz, wpconv);
  552. mul3x3(rgb2rgb, tmp, xyz2rgb);
  553. } else {
  554. mul3x3(rgb2rgb, rgb2xyz, xyz2rgb);
  555. }
  556. for (m = 0; m < 3; m++)
  557. for (n = 0; n < 3; n++) {
  558. s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
  559. for (o = 1; o < 8; o++)
  560. s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
  561. }
  562. emms = 1;
  563. }
  564. }
  565. if (!s->in_txchr) {
  566. av_freep(&s->lin_lut);
  567. s->in_trc = in->color_trc;
  568. s->in_txchr = get_transfer_characteristics(s->in_trc);
  569. if (!s->in_txchr) {
  570. av_log(ctx, AV_LOG_ERROR,
  571. "Unsupported input transfer characteristics %d (%s)\n",
  572. s->in_trc, av_color_transfer_name(s->in_trc));
  573. return AVERROR(EINVAL);
  574. }
  575. }
  576. if (!s->out_txchr) {
  577. av_freep(&s->lin_lut);
  578. s->out_trc = out->color_trc;
  579. s->out_txchr = get_transfer_characteristics(s->out_trc);
  580. if (!s->out_txchr) {
  581. if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
  582. if (s->user_all == CS_UNSPECIFIED) {
  583. av_log(ctx, AV_LOG_ERROR,
  584. "Please specify output transfer characteristics\n");
  585. } else {
  586. av_log(ctx, AV_LOG_ERROR,
  587. "Unsupported output color property %d\n", s->user_all);
  588. }
  589. } else {
  590. av_log(ctx, AV_LOG_ERROR,
  591. "Unsupported output transfer characteristics %d (%s)\n",
  592. s->out_trc, av_color_transfer_name(s->out_trc));
  593. }
  594. return AVERROR(EINVAL);
  595. }
  596. }
  597. s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
  598. !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
  599. if (!s->rgb2rgb_passthrough && !s->lin_lut) {
  600. res = fill_gamma_table(s);
  601. if (res < 0)
  602. return res;
  603. emms = 1;
  604. }
  605. if (!s->in_lumacoef) {
  606. s->in_csp = in->colorspace;
  607. s->in_rng = in->color_range;
  608. s->in_lumacoef = get_luma_coefficients(s->in_csp);
  609. if (!s->in_lumacoef) {
  610. av_log(ctx, AV_LOG_ERROR,
  611. "Unsupported input colorspace %d (%s)\n",
  612. s->in_csp, av_color_space_name(s->in_csp));
  613. return AVERROR(EINVAL);
  614. }
  615. redo_yuv2rgb = 1;
  616. }
  617. if (!s->out_lumacoef) {
  618. s->out_csp = out->colorspace;
  619. s->out_rng = out->color_range;
  620. s->out_lumacoef = get_luma_coefficients(s->out_csp);
  621. if (!s->out_lumacoef) {
  622. if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
  623. if (s->user_all == CS_UNSPECIFIED) {
  624. av_log(ctx, AV_LOG_ERROR,
  625. "Please specify output transfer characteristics\n");
  626. } else {
  627. av_log(ctx, AV_LOG_ERROR,
  628. "Unsupported output color property %d\n", s->user_all);
  629. }
  630. } else {
  631. av_log(ctx, AV_LOG_ERROR,
  632. "Unsupported output transfer characteristics %d (%s)\n",
  633. s->out_csp, av_color_space_name(s->out_csp));
  634. }
  635. return AVERROR(EINVAL);
  636. }
  637. redo_rgb2yuv = 1;
  638. }
  639. fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
  640. in_desc->log2_chroma_w == out_desc->log2_chroma_w;
  641. s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
  642. s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
  643. !memcmp(s->in_lumacoef, s->out_lumacoef,
  644. sizeof(*s->in_lumacoef)) &&
  645. in_desc->comp[0].depth == out_desc->comp[0].depth;
  646. if (!s->yuv2yuv_passthrough) {
  647. if (redo_yuv2rgb) {
  648. double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
  649. int off, bits, in_rng;
  650. res = get_range_off(&off, &s->in_y_rng, &s->in_uv_rng,
  651. s->in_rng, in_desc->comp[0].depth);
  652. if (res < 0) {
  653. av_log(ctx, AV_LOG_ERROR,
  654. "Unsupported input color range %d (%s)\n",
  655. s->in_rng, av_color_range_name(s->in_rng));
  656. return res;
  657. }
  658. for (n = 0; n < 8; n++)
  659. s->yuv_offset[0][n] = off;
  660. fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
  661. invert_matrix3x3(rgb2yuv, yuv2rgb);
  662. bits = 1 << (in_desc->comp[0].depth - 1);
  663. for (n = 0; n < 3; n++) {
  664. for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
  665. s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
  666. for (o = 1; o < 8; o++)
  667. s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
  668. }
  669. }
  670. av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
  671. av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
  672. av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
  673. av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
  674. s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
  675. [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
  676. emms = 1;
  677. }
  678. if (redo_rgb2yuv) {
  679. double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
  680. int off, out_rng, bits;
  681. res = get_range_off(&off, &s->out_y_rng, &s->out_uv_rng,
  682. s->out_rng, out_desc->comp[0].depth);
  683. if (res < 0) {
  684. av_log(ctx, AV_LOG_ERROR,
  685. "Unsupported output color range %d (%s)\n",
  686. s->out_rng, av_color_range_name(s->out_rng));
  687. return res;
  688. }
  689. for (n = 0; n < 8; n++)
  690. s->yuv_offset[1][n] = off;
  691. fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
  692. bits = 1 << (29 - out_desc->comp[0].depth);
  693. for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
  694. for (m = 0; m < 3; m++) {
  695. s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
  696. for (o = 1; o < 8; o++)
  697. s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
  698. }
  699. }
  700. av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
  701. s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
  702. [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
  703. s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
  704. [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
  705. emms = 1;
  706. }
  707. if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
  708. int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
  709. double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
  710. double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
  711. double yuv2yuv[3][3];
  712. int in_rng, out_rng;
  713. mul3x3(yuv2yuv, yuv2rgb, rgb2yuv);
  714. for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
  715. for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
  716. s->yuv2yuv_coeffs[m][n][0] =
  717. lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
  718. (in_rng * (1 << odepth)));
  719. for (o = 1; o < 8; o++)
  720. s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
  721. }
  722. }
  723. av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
  724. av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
  725. s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
  726. [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
  727. }
  728. }
  729. if (emms)
  730. emms_c();
  731. return 0;
  732. }
  733. static int init(AVFilterContext *ctx)
  734. {
  735. ColorSpaceContext *s = ctx->priv;
  736. ff_colorspacedsp_init(&s->dsp);
  737. return 0;
  738. }
  739. static void uninit(AVFilterContext *ctx)
  740. {
  741. ColorSpaceContext *s = ctx->priv;
  742. av_freep(&s->rgb[0]);
  743. av_freep(&s->rgb[1]);
  744. av_freep(&s->rgb[2]);
  745. s->rgb_sz = 0;
  746. av_freep(&s->dither_scratch_base[0][0]);
  747. av_freep(&s->dither_scratch_base[0][1]);
  748. av_freep(&s->dither_scratch_base[1][0]);
  749. av_freep(&s->dither_scratch_base[1][1]);
  750. av_freep(&s->dither_scratch_base[2][0]);
  751. av_freep(&s->dither_scratch_base[2][1]);
  752. av_freep(&s->lin_lut);
  753. }
  754. static int filter_frame(AVFilterLink *link, AVFrame *in)
  755. {
  756. AVFilterContext *ctx = link->dst;
  757. AVFilterLink *outlink = ctx->outputs[0];
  758. ColorSpaceContext *s = ctx->priv;
  759. // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
  760. // input one if it is writable *OR* the actual literal values of in_*
  761. // and out_* are identical (not just their respective properties)
  762. AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  763. int res;
  764. ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
  765. unsigned rgb_sz = rgb_stride * in->height;
  766. struct ThreadData td;
  767. if (!out) {
  768. av_frame_free(&in);
  769. return AVERROR(ENOMEM);
  770. }
  771. av_frame_copy_props(out, in);
  772. out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
  773. default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
  774. if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
  775. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
  776. out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
  777. if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
  778. out->color_trc = AVCOL_TRC_BT2020_12;
  779. } else {
  780. out->color_trc = s->user_trc;
  781. }
  782. out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
  783. default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
  784. out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
  785. in->color_range : s->user_rng;
  786. if (rgb_sz != s->rgb_sz) {
  787. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format);
  788. int uvw = in->width >> desc->log2_chroma_w;
  789. av_freep(&s->rgb[0]);
  790. av_freep(&s->rgb[1]);
  791. av_freep(&s->rgb[2]);
  792. s->rgb_sz = 0;
  793. av_freep(&s->dither_scratch_base[0][0]);
  794. av_freep(&s->dither_scratch_base[0][1]);
  795. av_freep(&s->dither_scratch_base[1][0]);
  796. av_freep(&s->dither_scratch_base[1][1]);
  797. av_freep(&s->dither_scratch_base[2][0]);
  798. av_freep(&s->dither_scratch_base[2][1]);
  799. s->rgb[0] = av_malloc(rgb_sz);
  800. s->rgb[1] = av_malloc(rgb_sz);
  801. s->rgb[2] = av_malloc(rgb_sz);
  802. s->dither_scratch_base[0][0] =
  803. av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
  804. s->dither_scratch_base[0][1] =
  805. av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
  806. s->dither_scratch_base[1][0] =
  807. av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
  808. s->dither_scratch_base[1][1] =
  809. av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
  810. s->dither_scratch_base[2][0] =
  811. av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
  812. s->dither_scratch_base[2][1] =
  813. av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
  814. s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
  815. s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
  816. s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
  817. s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
  818. s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
  819. s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
  820. if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
  821. !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
  822. !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
  823. !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
  824. uninit(ctx);
  825. return AVERROR(ENOMEM);
  826. }
  827. s->rgb_sz = rgb_sz;
  828. }
  829. res = create_filtergraph(ctx, in, out);
  830. if (res < 0)
  831. return res;
  832. s->rgb_stride = rgb_stride / sizeof(int16_t);
  833. td.in = in;
  834. td.out = out;
  835. td.in_linesize[0] = in->linesize[0];
  836. td.in_linesize[1] = in->linesize[1];
  837. td.in_linesize[2] = in->linesize[2];
  838. td.out_linesize[0] = out->linesize[0];
  839. td.out_linesize[1] = out->linesize[1];
  840. td.out_linesize[2] = out->linesize[2];
  841. td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h;
  842. td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h;
  843. if (s->yuv2yuv_passthrough) {
  844. res = av_frame_copy(out, in);
  845. if (res < 0)
  846. return res;
  847. } else {
  848. ctx->internal->execute(ctx, convert, &td, NULL,
  849. FFMIN((in->height + 1) >> 1, ctx->graph->nb_threads));
  850. }
  851. av_frame_free(&in);
  852. return ff_filter_frame(outlink, out);
  853. }
  854. static int query_formats(AVFilterContext *ctx)
  855. {
  856. static const enum AVPixelFormat pix_fmts[] = {
  857. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  858. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  859. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
  860. AV_PIX_FMT_NONE
  861. };
  862. int res;
  863. ColorSpaceContext *s = ctx->priv;
  864. AVFilterFormats *formats = ff_make_format_list(pix_fmts);
  865. if (!formats)
  866. return AVERROR(ENOMEM);
  867. if (s->user_format == AV_PIX_FMT_NONE)
  868. return ff_set_common_formats(ctx, formats);
  869. res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
  870. if (res < 0)
  871. return res;
  872. formats = NULL;
  873. res = ff_add_format(&formats, s->user_format);
  874. if (res < 0)
  875. return res;
  876. return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
  877. }
  878. static int config_props(AVFilterLink *outlink)
  879. {
  880. AVFilterLink *inlink = outlink->src->inputs[0];
  881. outlink->w = inlink->w;
  882. outlink->h = inlink->h;
  883. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  884. outlink->time_base = inlink->time_base;
  885. return 0;
  886. }
  887. #define OFFSET(x) offsetof(ColorSpaceContext, x)
  888. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
  889. #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
  890. static const AVOption colorspace_options[] = {
  891. { "all", "Set all color properties together",
  892. OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
  893. CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
  894. ENUM("bt470m", CS_BT470M, "all"),
  895. ENUM("bt470bg", CS_BT470BG, "all"),
  896. ENUM("bt601-6-525", CS_BT601_6_525, "all"),
  897. ENUM("bt601-6-625", CS_BT601_6_625, "all"),
  898. ENUM("bt709", CS_BT709, "all"),
  899. ENUM("smpte170m", CS_SMPTE170M, "all"),
  900. ENUM("smpte240m", CS_SMPTE240M, "all"),
  901. ENUM("bt2020", CS_BT2020, "all"),
  902. { "space", "Output colorspace",
  903. OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
  904. AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
  905. ENUM("bt709", AVCOL_SPC_BT709, "csp"),
  906. ENUM("fcc", AVCOL_SPC_FCC, "csp"),
  907. ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
  908. ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
  909. ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
  910. ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
  911. { "range", "Output color range",
  912. OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
  913. AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, "rng" },
  914. ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
  915. ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
  916. { "primaries", "Output color primaries",
  917. OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
  918. AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
  919. ENUM("bt709", AVCOL_PRI_BT709, "prm"),
  920. ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
  921. ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
  922. ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
  923. ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
  924. ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
  925. { "trc", "Output transfer characteristics",
  926. OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
  927. AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
  928. ENUM("bt709", AVCOL_TRC_BT709, "trc"),
  929. ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
  930. ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
  931. ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
  932. ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
  933. ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
  934. ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
  935. { "format", "Output pixel format",
  936. OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
  937. AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, "fmt" },
  938. ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
  939. ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
  940. ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
  941. ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
  942. ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
  943. ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
  944. ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
  945. ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
  946. ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
  947. { "fast", "Ignore primary chromaticity and gamma correction",
  948. OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
  949. 0, 1, FLAGS },
  950. { "dither", "Dithering mode",
  951. OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
  952. DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
  953. ENUM("none", DITHER_NONE, "dither"),
  954. ENUM("fsb", DITHER_FSB, "dither"),
  955. { "wpadapt", "Whitepoint adaptation method",
  956. OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
  957. WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
  958. ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
  959. ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
  960. ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
  961. { NULL }
  962. };
  963. AVFILTER_DEFINE_CLASS(colorspace);
  964. static const AVFilterPad inputs[] = {
  965. {
  966. .name = "default",
  967. .type = AVMEDIA_TYPE_VIDEO,
  968. .filter_frame = filter_frame,
  969. },
  970. { NULL }
  971. };
  972. static const AVFilterPad outputs[] = {
  973. {
  974. .name = "default",
  975. .type = AVMEDIA_TYPE_VIDEO,
  976. .config_props = config_props,
  977. },
  978. { NULL }
  979. };
  980. AVFilter ff_vf_colorspace = {
  981. .name = "colorspace",
  982. .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
  983. .init = init,
  984. .uninit = uninit,
  985. .query_formats = query_formats,
  986. .priv_size = sizeof(ColorSpaceContext),
  987. .priv_class = &colorspace_class,
  988. .inputs = inputs,
  989. .outputs = outputs,
  990. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  991. };