vf_colormatrix.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * ColorMatrix v2.2 for Avisynth 2.5.x
  3. *
  4. * Copyright (C) 2006-2007 Kevin Stone
  5. *
  6. * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  16. * License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
  25. * Dijkhof. It adds the ability to convert between any of: Rec.709, FCC,
  26. * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
  27. * adds an option to use scaled or non-scaled coefficients, and more...
  28. */
  29. #include <float.h>
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/pixdesc.h"
  36. #include "libavutil/avstring.h"
  37. #define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
  38. #define CB(n) av_clip_uint8(n)
  39. static const double yuv_coeff_luma[5][3] = {
  40. { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
  41. { +0.5900, +0.1100, +0.3000 }, // FCC (1)
  42. { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
  43. { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
  44. { +0.6780, +0.0593, +0.2627 }, // Rec.2020 (4)
  45. };
  46. enum ColorMode {
  47. COLOR_MODE_NONE = -1,
  48. COLOR_MODE_BT709,
  49. COLOR_MODE_FCC,
  50. COLOR_MODE_BT601,
  51. COLOR_MODE_SMPTE240M,
  52. COLOR_MODE_BT2020,
  53. COLOR_MODE_COUNT
  54. };
  55. typedef struct {
  56. const AVClass *class;
  57. int yuv_convert[25][3][3];
  58. int interlaced;
  59. int source, dest; ///< ColorMode
  60. int mode;
  61. int hsub, vsub;
  62. } ColorMatrixContext;
  63. typedef struct ThreadData {
  64. AVFrame *dst;
  65. const AVFrame *src;
  66. int c2;
  67. int c3;
  68. int c4;
  69. int c5;
  70. int c6;
  71. int c7;
  72. } ThreadData;
  73. #define OFFSET(x) offsetof(ColorMatrixContext, x)
  74. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  75. static const AVOption colormatrix_options[] = {
  76. { "src", "set source color matrix", OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
  77. { "dst", "set destination color matrix", OFFSET(dest), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
  78. { "bt709", "set BT.709 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709}, .flags=FLAGS, .unit="color_mode" },
  79. { "fcc", "set FCC colorspace ", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC}, .flags=FLAGS, .unit="color_mode" },
  80. { "bt601", "set BT.601 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
  81. { "bt470", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
  82. { "bt470bg", "set BT.470 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
  83. { "smpte170m", "set SMTPE-170M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601}, .flags=FLAGS, .unit="color_mode" },
  84. { "smpte240m", "set SMPTE-240M colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M}, .flags=FLAGS, .unit="color_mode" },
  85. { "bt2020", "set BT.2020 colorspace", 0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT2020}, .flags=FLAGS, .unit="color_mode" },
  86. { NULL }
  87. };
  88. AVFILTER_DEFINE_CLASS(colormatrix);
  89. #define ma m[0][0]
  90. #define mb m[0][1]
  91. #define mc m[0][2]
  92. #define md m[1][0]
  93. #define me m[1][1]
  94. #define mf m[1][2]
  95. #define mg m[2][0]
  96. #define mh m[2][1]
  97. #define mi m[2][2]
  98. #define ima im[0][0]
  99. #define imb im[0][1]
  100. #define imc im[0][2]
  101. #define imd im[1][0]
  102. #define ime im[1][1]
  103. #define imf im[1][2]
  104. #define img im[2][0]
  105. #define imh im[2][1]
  106. #define imi im[2][2]
  107. static void inverse3x3(double im[3][3], double m[3][3])
  108. {
  109. double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
  110. det = 1.0 / det;
  111. ima = det * (me * mi - mf * mh);
  112. imb = det * (mc * mh - mb * mi);
  113. imc = det * (mb * mf - mc * me);
  114. imd = det * (mf * mg - md * mi);
  115. ime = det * (ma * mi - mc * mg);
  116. imf = det * (mc * md - ma * mf);
  117. img = det * (md * mh - me * mg);
  118. imh = det * (mb * mg - ma * mh);
  119. imi = det * (ma * me - mb * md);
  120. }
  121. static void solve_coefficients(double cm[3][3], double rgb[3][3], double yuv[3][3])
  122. {
  123. int i, j;
  124. for (i = 0; i < 3; i++)
  125. for (j = 0; j < 3; j++)
  126. cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
  127. }
  128. static void calc_coefficients(AVFilterContext *ctx)
  129. {
  130. ColorMatrixContext *color = ctx->priv;
  131. double yuv_coeff[5][3][3];
  132. double rgb_coeffd[5][3][3];
  133. double yuv_convertd[25][3][3];
  134. double bscale, rscale;
  135. int v = 0;
  136. int i, j, k;
  137. for (i = 0; i < 5; i++) {
  138. yuv_coeff[i][0][0] = yuv_coeff_luma[i][0];
  139. yuv_coeff[i][0][1] = yuv_coeff_luma[i][1];
  140. yuv_coeff[i][0][2] = yuv_coeff_luma[i][2];
  141. bscale = 0.5 / (yuv_coeff[i][0][1] - 1.0);
  142. rscale = 0.5 / (yuv_coeff[i][0][2] - 1.0);
  143. yuv_coeff[i][1][0] = bscale * yuv_coeff[i][0][0];
  144. yuv_coeff[i][1][1] = 0.5;
  145. yuv_coeff[i][1][2] = bscale * yuv_coeff[i][0][2];
  146. yuv_coeff[i][2][0] = rscale * yuv_coeff[i][0][0];
  147. yuv_coeff[i][2][1] = rscale * yuv_coeff[i][0][1];
  148. yuv_coeff[i][2][2] = 0.5;
  149. }
  150. for (i = 0; i < 5; i++)
  151. inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
  152. for (i = 0; i < 5; i++) {
  153. for (j = 0; j < 5; j++) {
  154. solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
  155. for (k = 0; k < 3; k++) {
  156. color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
  157. color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
  158. color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
  159. }
  160. if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
  161. color->yuv_convert[v][2][0] != 0) {
  162. av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
  163. }
  164. v++;
  165. }
  166. }
  167. }
  168. static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m", "bt2020"};
  169. static av_cold int init(AVFilterContext *ctx)
  170. {
  171. ColorMatrixContext *color = ctx->priv;
  172. if (color->dest == COLOR_MODE_NONE) {
  173. av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
  174. return AVERROR(EINVAL);
  175. }
  176. if (color->source == color->dest) {
  177. av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
  178. return AVERROR(EINVAL);
  179. }
  180. calc_coefficients(ctx);
  181. return 0;
  182. }
  183. static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  184. {
  185. const ThreadData *td = arg;
  186. const AVFrame *src = td->src;
  187. AVFrame *dst = td->dst;
  188. const int height = src->height;
  189. const int width = src->width*2;
  190. const int src_pitch = src->linesize[0];
  191. const int dst_pitch = dst->linesize[0];
  192. const int slice_start = (height * jobnr ) / nb_jobs;
  193. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  194. const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
  195. unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
  196. const int c2 = td->c2;
  197. const int c3 = td->c3;
  198. const int c4 = td->c4;
  199. const int c5 = td->c5;
  200. const int c6 = td->c6;
  201. const int c7 = td->c7;
  202. int x, y;
  203. for (y = slice_start; y < slice_end; y++) {
  204. for (x = 0; x < width; x += 4) {
  205. const int u = srcp[x + 0] - 128;
  206. const int v = srcp[x + 2] - 128;
  207. const int uvval = c2 * u + c3 * v + 1081344;
  208. dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
  209. dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
  210. dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
  211. dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
  212. }
  213. srcp += src_pitch;
  214. dstp += dst_pitch;
  215. }
  216. return 0;
  217. }
  218. static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  219. {
  220. const ThreadData *td = arg;
  221. const AVFrame *src = td->src;
  222. AVFrame *dst = td->dst;
  223. const int height = src->height;
  224. const int width = src->width;
  225. const int slice_start = (height * jobnr ) / nb_jobs;
  226. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  227. const int src_pitchY = src->linesize[0];
  228. const int src_pitchUV = src->linesize[1];
  229. const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
  230. const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
  231. const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
  232. const int dst_pitchY = dst->linesize[0];
  233. const int dst_pitchUV = dst->linesize[1];
  234. unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
  235. unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
  236. unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
  237. const int c2 = td->c2;
  238. const int c3 = td->c3;
  239. const int c4 = td->c4;
  240. const int c5 = td->c5;
  241. const int c6 = td->c6;
  242. const int c7 = td->c7;
  243. int x, y;
  244. for (y = slice_start; y < slice_end; y++) {
  245. for (x = 0; x < width; x++) {
  246. const int u = srcpU[x] - 128;
  247. const int v = srcpV[x] - 128;
  248. const int uvval = c2 * u + c3 * v + 1081344;
  249. dstpY[x] = CB((65536 * (srcpY[x] - 16) + uvval) >> 16);
  250. dstpU[x] = CB((c4 * u + c5 * v + 8421376) >> 16);
  251. dstpV[x] = CB((c6 * u + c7 * v + 8421376) >> 16);
  252. }
  253. srcpY += src_pitchY;
  254. dstpY += dst_pitchY;
  255. srcpU += src_pitchUV;
  256. srcpV += src_pitchUV;
  257. dstpU += dst_pitchUV;
  258. dstpV += dst_pitchUV;
  259. }
  260. return 0;
  261. }
  262. static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  263. {
  264. const ThreadData *td = arg;
  265. const AVFrame *src = td->src;
  266. AVFrame *dst = td->dst;
  267. const int height = src->height;
  268. const int width = src->width;
  269. const int slice_start = (height * jobnr ) / nb_jobs;
  270. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  271. const int src_pitchY = src->linesize[0];
  272. const int src_pitchUV = src->linesize[1];
  273. const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
  274. const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
  275. const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
  276. const int dst_pitchY = dst->linesize[0];
  277. const int dst_pitchUV = dst->linesize[1];
  278. unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
  279. unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
  280. unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
  281. const int c2 = td->c2;
  282. const int c3 = td->c3;
  283. const int c4 = td->c4;
  284. const int c5 = td->c5;
  285. const int c6 = td->c6;
  286. const int c7 = td->c7;
  287. int x, y;
  288. for (y = slice_start; y < slice_end; y++) {
  289. for (x = 0; x < width; x += 2) {
  290. const int u = srcpU[x >> 1] - 128;
  291. const int v = srcpV[x >> 1] - 128;
  292. const int uvval = c2 * u + c3 * v + 1081344;
  293. dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
  294. dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
  295. dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
  296. dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
  297. }
  298. srcpY += src_pitchY;
  299. dstpY += dst_pitchY;
  300. srcpU += src_pitchUV;
  301. srcpV += src_pitchUV;
  302. dstpU += dst_pitchUV;
  303. dstpV += dst_pitchUV;
  304. }
  305. return 0;
  306. }
  307. static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  308. {
  309. const ThreadData *td = arg;
  310. const AVFrame *src = td->src;
  311. AVFrame *dst = td->dst;
  312. const int height = FFALIGN(src->height, 2) >> 1;
  313. const int width = src->width;
  314. const int slice_start = ((height * jobnr ) / nb_jobs) << 1;
  315. const int slice_end = ((height * (jobnr+1)) / nb_jobs) << 1;
  316. const int src_pitchY = src->linesize[0];
  317. const int src_pitchUV = src->linesize[1];
  318. const int dst_pitchY = dst->linesize[0];
  319. const int dst_pitchUV = dst->linesize[1];
  320. const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
  321. const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
  322. const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
  323. const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
  324. unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
  325. unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
  326. unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
  327. unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
  328. const int c2 = td->c2;
  329. const int c3 = td->c3;
  330. const int c4 = td->c4;
  331. const int c5 = td->c5;
  332. const int c6 = td->c6;
  333. const int c7 = td->c7;
  334. int x, y;
  335. for (y = slice_start; y < slice_end; y += 2) {
  336. for (x = 0; x < width; x += 2) {
  337. const int u = srcpU[x >> 1] - 128;
  338. const int v = srcpV[x >> 1] - 128;
  339. const int uvval = c2 * u + c3 * v + 1081344;
  340. dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
  341. dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
  342. dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
  343. dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
  344. dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
  345. dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
  346. }
  347. srcpY += src_pitchY << 1;
  348. dstpY += dst_pitchY << 1;
  349. srcpN += src_pitchY << 1;
  350. dstpN += dst_pitchY << 1;
  351. srcpU += src_pitchUV;
  352. srcpV += src_pitchUV;
  353. dstpU += dst_pitchUV;
  354. dstpV += dst_pitchUV;
  355. }
  356. return 0;
  357. }
  358. static int config_input(AVFilterLink *inlink)
  359. {
  360. AVFilterContext *ctx = inlink->dst;
  361. ColorMatrixContext *color = ctx->priv;
  362. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  363. color->hsub = pix_desc->log2_chroma_w;
  364. color->vsub = pix_desc->log2_chroma_h;
  365. av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
  366. color_modes[color->source], color_modes[color->dest]);
  367. return 0;
  368. }
  369. static int query_formats(AVFilterContext *ctx)
  370. {
  371. static const enum AVPixelFormat pix_fmts[] = {
  372. AV_PIX_FMT_YUV444P,
  373. AV_PIX_FMT_YUV422P,
  374. AV_PIX_FMT_YUV420P,
  375. AV_PIX_FMT_UYVY422,
  376. AV_PIX_FMT_NONE
  377. };
  378. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  379. if (!fmts_list)
  380. return AVERROR(ENOMEM);
  381. return ff_set_common_formats(ctx, fmts_list);
  382. }
  383. static int filter_frame(AVFilterLink *link, AVFrame *in)
  384. {
  385. AVFilterContext *ctx = link->dst;
  386. ColorMatrixContext *color = ctx->priv;
  387. AVFilterLink *outlink = ctx->outputs[0];
  388. AVFrame *out;
  389. ThreadData td = {0};
  390. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  391. if (!out) {
  392. av_frame_free(&in);
  393. return AVERROR(ENOMEM);
  394. }
  395. av_frame_copy_props(out, in);
  396. if (color->source == COLOR_MODE_NONE) {
  397. enum AVColorSpace cs = av_frame_get_colorspace(in);
  398. enum ColorMode source;
  399. switch(cs) {
  400. case AVCOL_SPC_BT709 : source = COLOR_MODE_BT709 ; break;
  401. case AVCOL_SPC_FCC : source = COLOR_MODE_FCC ; break;
  402. case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
  403. case AVCOL_SPC_BT470BG : source = COLOR_MODE_BT601 ; break;
  404. case AVCOL_SPC_SMPTE170M : source = COLOR_MODE_BT601 ; break;
  405. case AVCOL_SPC_BT2020_NCL: source = COLOR_MODE_BT2020 ; break;
  406. case AVCOL_SPC_BT2020_CL : source = COLOR_MODE_BT2020 ; break;
  407. default :
  408. av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
  409. av_frame_free(&out);
  410. return AVERROR(EINVAL);
  411. }
  412. color->mode = source * 5 + color->dest;
  413. } else
  414. color->mode = color->source * 5 + color->dest;
  415. switch(color->dest) {
  416. case COLOR_MODE_BT709 : av_frame_set_colorspace(out, AVCOL_SPC_BT709) ; break;
  417. case COLOR_MODE_FCC : av_frame_set_colorspace(out, AVCOL_SPC_FCC) ; break;
  418. case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M) ; break;
  419. case COLOR_MODE_BT601 : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG) ; break;
  420. case COLOR_MODE_BT2020 : av_frame_set_colorspace(out, AVCOL_SPC_BT2020_NCL); break;
  421. }
  422. td.src = in;
  423. td.dst = out;
  424. td.c2 = color->yuv_convert[color->mode][0][1];
  425. td.c3 = color->yuv_convert[color->mode][0][2];
  426. td.c4 = color->yuv_convert[color->mode][1][1];
  427. td.c5 = color->yuv_convert[color->mode][1][2];
  428. td.c6 = color->yuv_convert[color->mode][2][1];
  429. td.c7 = color->yuv_convert[color->mode][2][2];
  430. if (in->format == AV_PIX_FMT_YUV444P)
  431. ctx->internal->execute(ctx, process_slice_yuv444p, &td, NULL,
  432. FFMIN(in->height, ctx->graph->nb_threads));
  433. else if (in->format == AV_PIX_FMT_YUV422P)
  434. ctx->internal->execute(ctx, process_slice_yuv422p, &td, NULL,
  435. FFMIN(in->height, ctx->graph->nb_threads));
  436. else if (in->format == AV_PIX_FMT_YUV420P)
  437. ctx->internal->execute(ctx, process_slice_yuv420p, &td, NULL,
  438. FFMIN(in->height / 2, ctx->graph->nb_threads));
  439. else
  440. ctx->internal->execute(ctx, process_slice_uyvy422, &td, NULL,
  441. FFMIN(in->height, ctx->graph->nb_threads));
  442. av_frame_free(&in);
  443. return ff_filter_frame(outlink, out);
  444. }
  445. static const AVFilterPad colormatrix_inputs[] = {
  446. {
  447. .name = "default",
  448. .type = AVMEDIA_TYPE_VIDEO,
  449. .config_props = config_input,
  450. .filter_frame = filter_frame,
  451. },
  452. { NULL }
  453. };
  454. static const AVFilterPad colormatrix_outputs[] = {
  455. {
  456. .name = "default",
  457. .type = AVMEDIA_TYPE_VIDEO,
  458. },
  459. { NULL }
  460. };
  461. AVFilter ff_vf_colormatrix = {
  462. .name = "colormatrix",
  463. .description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
  464. .priv_size = sizeof(ColorMatrixContext),
  465. .init = init,
  466. .query_formats = query_formats,
  467. .inputs = colormatrix_inputs,
  468. .outputs = colormatrix_outputs,
  469. .priv_class = &colormatrix_class,
  470. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  471. };