vf_signalstats.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * Copyright (c) 2010 Mark Heath mjpeg0 @ silicontrip dot org
  3. * Copyright (c) 2014 Clément Bœsch
  4. * Copyright (c) 2014 Dave Rice @dericed
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/opt.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "internal.h"
  25. enum FilterMode {
  26. FILTER_NONE = -1,
  27. FILTER_TOUT,
  28. FILTER_VREP,
  29. FILTER_BRNG,
  30. FILT_NUMB
  31. };
  32. typedef struct {
  33. const AVClass *class;
  34. int chromah; // height of chroma plane
  35. int chromaw; // width of chroma plane
  36. int hsub; // horizontal subsampling
  37. int vsub; // vertical subsampling
  38. int fs; // pixel count per frame
  39. int cfs; // pixel count per frame of chroma planes
  40. enum FilterMode outfilter;
  41. int filters;
  42. AVFrame *frame_prev;
  43. uint8_t rgba_color[4];
  44. int yuv_color[3];
  45. int nb_jobs;
  46. int *jobs_rets;
  47. AVFrame *frame_sat;
  48. AVFrame *frame_hue;
  49. } SignalstatsContext;
  50. typedef struct ThreadData {
  51. const AVFrame *in;
  52. AVFrame *out;
  53. } ThreadData;
  54. typedef struct ThreadDataHueSatMetrics {
  55. const AVFrame *src;
  56. AVFrame *dst_sat, *dst_hue;
  57. } ThreadDataHueSatMetrics;
  58. #define OFFSET(x) offsetof(SignalstatsContext, x)
  59. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  60. static const AVOption signalstats_options[] = {
  61. {"stat", "set statistics filters", OFFSET(filters), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "filters"},
  62. {"tout", "analyze pixels for temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_TOUT}, 0, 0, FLAGS, "filters"},
  63. {"vrep", "analyze video lines for vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_VREP}, 0, 0, FLAGS, "filters"},
  64. {"brng", "analyze for pixels outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=1<<FILTER_BRNG}, 0, 0, FLAGS, "filters"},
  65. {"out", "set video filter", OFFSET(outfilter), AV_OPT_TYPE_INT, {.i64=FILTER_NONE}, -1, FILT_NUMB-1, FLAGS, "out"},
  66. {"tout", "highlight pixels that depict temporal outliers", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_TOUT}, 0, 0, FLAGS, "out"},
  67. {"vrep", "highlight video lines that depict vertical line repetition", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_VREP}, 0, 0, FLAGS, "out"},
  68. {"brng", "highlight pixels that are outside of broadcast range", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_BRNG}, 0, 0, FLAGS, "out"},
  69. {"c", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
  70. {"color", "set highlight color", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="yellow"}, .flags=FLAGS},
  71. {NULL}
  72. };
  73. AVFILTER_DEFINE_CLASS(signalstats);
  74. static av_cold int init(AVFilterContext *ctx)
  75. {
  76. uint8_t r, g, b;
  77. SignalstatsContext *s = ctx->priv;
  78. if (s->outfilter != FILTER_NONE)
  79. s->filters |= 1 << s->outfilter;
  80. r = s->rgba_color[0];
  81. g = s->rgba_color[1];
  82. b = s->rgba_color[2];
  83. s->yuv_color[0] = (( 66*r + 129*g + 25*b + (1<<7)) >> 8) + 16;
  84. s->yuv_color[1] = ((-38*r + -74*g + 112*b + (1<<7)) >> 8) + 128;
  85. s->yuv_color[2] = ((112*r + -94*g + -18*b + (1<<7)) >> 8) + 128;
  86. return 0;
  87. }
  88. static av_cold void uninit(AVFilterContext *ctx)
  89. {
  90. SignalstatsContext *s = ctx->priv;
  91. av_frame_free(&s->frame_prev);
  92. av_frame_free(&s->frame_sat);
  93. av_frame_free(&s->frame_hue);
  94. av_freep(&s->jobs_rets);
  95. }
  96. static int query_formats(AVFilterContext *ctx)
  97. {
  98. // TODO: add more
  99. static const enum AVPixelFormat pix_fmts[] = {
  100. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  101. AV_PIX_FMT_YUV440P,
  102. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
  103. AV_PIX_FMT_YUVJ440P,
  104. AV_PIX_FMT_NONE
  105. };
  106. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  107. return 0;
  108. }
  109. static AVFrame *alloc_frame(enum AVPixelFormat pixfmt, int w, int h)
  110. {
  111. AVFrame *frame = av_frame_alloc();
  112. if (!frame)
  113. return NULL;
  114. frame->format = pixfmt;
  115. frame->width = w;
  116. frame->height = h;
  117. if (av_frame_get_buffer(frame, 32) < 0) {
  118. av_frame_free(&frame);
  119. return NULL;
  120. }
  121. return frame;
  122. }
  123. static int config_props(AVFilterLink *outlink)
  124. {
  125. AVFilterContext *ctx = outlink->src;
  126. SignalstatsContext *s = ctx->priv;
  127. AVFilterLink *inlink = outlink->src->inputs[0];
  128. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
  129. s->hsub = desc->log2_chroma_w;
  130. s->vsub = desc->log2_chroma_h;
  131. outlink->w = inlink->w;
  132. outlink->h = inlink->h;
  133. s->chromaw = FF_CEIL_RSHIFT(inlink->w, s->hsub);
  134. s->chromah = FF_CEIL_RSHIFT(inlink->h, s->vsub);
  135. s->fs = inlink->w * inlink->h;
  136. s->cfs = s->chromaw * s->chromah;
  137. s->nb_jobs = FFMAX(1, FFMIN(inlink->h, ctx->graph->nb_threads));
  138. s->jobs_rets = av_malloc_array(s->nb_jobs, sizeof(*s->jobs_rets));
  139. if (!s->jobs_rets)
  140. return AVERROR(ENOMEM);
  141. s->frame_sat = alloc_frame(AV_PIX_FMT_GRAY8, inlink->w, inlink->h);
  142. s->frame_hue = alloc_frame(AV_PIX_FMT_GRAY16, inlink->w, inlink->h);
  143. if (!s->frame_sat || !s->frame_hue)
  144. return AVERROR(ENOMEM);
  145. return 0;
  146. }
  147. static void burn_frame(const SignalstatsContext *s, AVFrame *f, int x, int y)
  148. {
  149. const int chromax = x >> s->hsub;
  150. const int chromay = y >> s->vsub;
  151. f->data[0][y * f->linesize[0] + x] = s->yuv_color[0];
  152. f->data[1][chromay * f->linesize[1] + chromax] = s->yuv_color[1];
  153. f->data[2][chromay * f->linesize[2] + chromax] = s->yuv_color[2];
  154. }
  155. static int filter_brng(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  156. {
  157. ThreadData *td = arg;
  158. const SignalstatsContext *s = ctx->priv;
  159. const AVFrame *in = td->in;
  160. AVFrame *out = td->out;
  161. const int w = in->width;
  162. const int h = in->height;
  163. const int slice_start = (h * jobnr ) / nb_jobs;
  164. const int slice_end = (h * (jobnr+1)) / nb_jobs;
  165. int x, y, score = 0;
  166. for (y = slice_start; y < slice_end; y++) {
  167. const int yc = y >> s->vsub;
  168. const uint8_t *pluma = &in->data[0][y * in->linesize[0]];
  169. const uint8_t *pchromau = &in->data[1][yc * in->linesize[1]];
  170. const uint8_t *pchromav = &in->data[2][yc * in->linesize[2]];
  171. for (x = 0; x < w; x++) {
  172. const int xc = x >> s->hsub;
  173. const int luma = pluma[x];
  174. const int chromau = pchromau[xc];
  175. const int chromav = pchromav[xc];
  176. const int filt = luma < 16 || luma > 235 ||
  177. chromau < 16 || chromau > 240 ||
  178. chromav < 16 || chromav > 240;
  179. score += filt;
  180. if (out && filt)
  181. burn_frame(s, out, x, y);
  182. }
  183. }
  184. return score;
  185. }
  186. static int filter_tout_outlier(uint8_t x, uint8_t y, uint8_t z)
  187. {
  188. return ((abs(x - y) + abs (z - y)) / 2) - abs(z - x) > 4; // make 4 configurable?
  189. }
  190. static int filter_tout(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  191. {
  192. ThreadData *td = arg;
  193. const SignalstatsContext *s = ctx->priv;
  194. const AVFrame *in = td->in;
  195. AVFrame *out = td->out;
  196. const int w = in->width;
  197. const int h = in->height;
  198. const int slice_start = (h * jobnr ) / nb_jobs;
  199. const int slice_end = (h * (jobnr+1)) / nb_jobs;
  200. const uint8_t *p = in->data[0];
  201. int lw = in->linesize[0];
  202. int x, y, score = 0, filt;
  203. for (y = slice_start; y < slice_end; y++) {
  204. if (y - 1 < 0 || y + 1 >= h)
  205. continue;
  206. // detect two pixels above and below (to eliminate interlace artefacts)
  207. // should check that video format is infact interlaced.
  208. #define FILTER(i, j) \
  209. filter_tout_outlier(p[(y-j) * lw + x + i], \
  210. p[ y * lw + x + i], \
  211. p[(y+j) * lw + x + i])
  212. #define FILTER3(j) (FILTER(-1, j) && FILTER(0, j) && FILTER(1, j))
  213. if (y - 2 >= 0 && y + 2 < h) {
  214. for (x = 1; x < w - 1; x++) {
  215. filt = FILTER3(2) && FILTER3(1);
  216. score += filt;
  217. if (filt && out)
  218. burn_frame(s, out, x, y);
  219. }
  220. } else {
  221. for (x = 1; x < w - 1; x++) {
  222. filt = FILTER3(1);
  223. score += filt;
  224. if (filt && out)
  225. burn_frame(s, out, x, y);
  226. }
  227. }
  228. }
  229. return score;
  230. }
  231. #define VREP_START 4
  232. static int filter_vrep(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  233. {
  234. ThreadData *td = arg;
  235. const SignalstatsContext *s = ctx->priv;
  236. const AVFrame *in = td->in;
  237. AVFrame *out = td->out;
  238. const int w = in->width;
  239. const int h = in->height;
  240. const int slice_start = (h * jobnr ) / nb_jobs;
  241. const int slice_end = (h * (jobnr+1)) / nb_jobs;
  242. const uint8_t *p = in->data[0];
  243. const int lw = in->linesize[0];
  244. int x, y, score = 0;
  245. for (y = slice_start; y < slice_end; y++) {
  246. const int y2lw = (y - VREP_START) * lw;
  247. const int ylw = y * lw;
  248. int filt, totdiff = 0;
  249. if (y < VREP_START)
  250. continue;
  251. for (x = 0; x < w; x++)
  252. totdiff += abs(p[y2lw + x] - p[ylw + x]);
  253. filt = totdiff < w;
  254. score += filt;
  255. if (filt && out)
  256. for (x = 0; x < w; x++)
  257. burn_frame(s, out, x, y);
  258. }
  259. return score * w;
  260. }
  261. static const struct {
  262. const char *name;
  263. int (*process)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  264. } filters_def[] = {
  265. {"TOUT", filter_tout},
  266. {"VREP", filter_vrep},
  267. {"BRNG", filter_brng},
  268. {NULL}
  269. };
  270. #define DEPTH 256
  271. static int compute_sat_hue_metrics(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  272. {
  273. int i, j;
  274. ThreadDataHueSatMetrics *td = arg;
  275. const SignalstatsContext *s = ctx->priv;
  276. const AVFrame *src = td->src;
  277. AVFrame *dst_sat = td->dst_sat;
  278. AVFrame *dst_hue = td->dst_hue;
  279. const int slice_start = (s->chromah * jobnr ) / nb_jobs;
  280. const int slice_end = (s->chromah * (jobnr+1)) / nb_jobs;
  281. const int lsz_u = src->linesize[1];
  282. const int lsz_v = src->linesize[2];
  283. const uint8_t *p_u = src->data[1] + slice_start * lsz_u;
  284. const uint8_t *p_v = src->data[2] + slice_start * lsz_v;
  285. const int lsz_sat = dst_sat->linesize[0];
  286. const int lsz_hue = dst_hue->linesize[0];
  287. uint8_t *p_sat = dst_sat->data[0] + slice_start * lsz_sat;
  288. uint8_t *p_hue = dst_hue->data[0] + slice_start * lsz_hue;
  289. for (j = slice_start; j < slice_end; j++) {
  290. for (i = 0; i < s->chromaw; i++) {
  291. const int yuvu = p_u[i];
  292. const int yuvv = p_v[i];
  293. p_sat[i] = hypot(yuvu - 128, yuvv - 128); // int or round?
  294. ((int16_t*)p_hue)[i] = floor((180 / M_PI) * atan2f(yuvu-128, yuvv-128) + 180);
  295. }
  296. p_u += lsz_u;
  297. p_v += lsz_v;
  298. p_sat += lsz_sat;
  299. p_hue += lsz_hue;
  300. }
  301. return 0;
  302. }
  303. static int filter_frame(AVFilterLink *link, AVFrame *in)
  304. {
  305. AVFilterContext *ctx = link->dst;
  306. SignalstatsContext *s = ctx->priv;
  307. AVFilterLink *outlink = ctx->outputs[0];
  308. AVFrame *out = in;
  309. int i, j;
  310. int w = 0, cw = 0, // in
  311. pw = 0, cpw = 0; // prev
  312. int fil;
  313. char metabuf[128];
  314. unsigned int histy[DEPTH] = {0},
  315. histu[DEPTH] = {0},
  316. histv[DEPTH] = {0},
  317. histhue[360] = {0},
  318. histsat[DEPTH] = {0}; // limited to 8 bit data.
  319. int miny = -1, minu = -1, minv = -1;
  320. int maxy = -1, maxu = -1, maxv = -1;
  321. int lowy = -1, lowu = -1, lowv = -1;
  322. int highy = -1, highu = -1, highv = -1;
  323. int minsat = -1, maxsat = -1, lowsat = -1, highsat = -1;
  324. int lowp, highp, clowp, chighp;
  325. int accy, accu, accv;
  326. int accsat, acchue = 0;
  327. int medhue, maxhue;
  328. int toty = 0, totu = 0, totv = 0, totsat=0;
  329. int tothue = 0;
  330. int dify = 0, difu = 0, difv = 0;
  331. int filtot[FILT_NUMB] = {0};
  332. AVFrame *prev;
  333. AVFrame *sat = s->frame_sat;
  334. AVFrame *hue = s->frame_hue;
  335. const uint8_t *p_sat = sat->data[0];
  336. const uint8_t *p_hue = hue->data[0];
  337. const int lsz_sat = sat->linesize[0];
  338. const int lsz_hue = hue->linesize[0];
  339. ThreadDataHueSatMetrics td_huesat = {
  340. .src = in,
  341. .dst_sat = sat,
  342. .dst_hue = hue,
  343. };
  344. if (!s->frame_prev)
  345. s->frame_prev = av_frame_clone(in);
  346. prev = s->frame_prev;
  347. if (s->outfilter != FILTER_NONE) {
  348. out = av_frame_clone(in);
  349. av_frame_make_writable(out);
  350. }
  351. ctx->internal->execute(ctx, compute_sat_hue_metrics, &td_huesat,
  352. NULL, FFMIN(s->chromah, ctx->graph->nb_threads));
  353. // Calculate luma histogram and difference with previous frame or field.
  354. for (j = 0; j < link->h; j++) {
  355. for (i = 0; i < link->w; i++) {
  356. const int yuv = in->data[0][w + i];
  357. histy[yuv]++;
  358. dify += abs(yuv - prev->data[0][pw + i]);
  359. }
  360. w += in->linesize[0];
  361. pw += prev->linesize[0];
  362. }
  363. // Calculate chroma histogram and difference with previous frame or field.
  364. for (j = 0; j < s->chromah; j++) {
  365. for (i = 0; i < s->chromaw; i++) {
  366. const int yuvu = in->data[1][cw+i];
  367. const int yuvv = in->data[2][cw+i];
  368. histu[yuvu]++;
  369. difu += abs(yuvu - prev->data[1][cpw+i]);
  370. histv[yuvv]++;
  371. difv += abs(yuvv - prev->data[2][cpw+i]);
  372. histsat[p_sat[i]]++;
  373. histhue[((int16_t*)p_hue)[i]]++;
  374. }
  375. cw += in->linesize[1];
  376. cpw += prev->linesize[1];
  377. p_sat += lsz_sat;
  378. p_hue += lsz_hue;
  379. }
  380. for (fil = 0; fil < FILT_NUMB; fil ++) {
  381. if (s->filters & 1<<fil) {
  382. ThreadData td = {
  383. .in = in,
  384. .out = out != in && s->outfilter == fil ? out : NULL,
  385. };
  386. memset(s->jobs_rets, 0, s->nb_jobs * sizeof(*s->jobs_rets));
  387. ctx->internal->execute(ctx, filters_def[fil].process,
  388. &td, s->jobs_rets, s->nb_jobs);
  389. for (i = 0; i < s->nb_jobs; i++)
  390. filtot[fil] += s->jobs_rets[i];
  391. }
  392. }
  393. // find low / high based on histogram percentile
  394. // these only need to be calculated once.
  395. lowp = lrint(s->fs * 10 / 100.);
  396. highp = lrint(s->fs * 90 / 100.);
  397. clowp = lrint(s->cfs * 10 / 100.);
  398. chighp = lrint(s->cfs * 90 / 100.);
  399. accy = accu = accv = accsat = 0;
  400. for (fil = 0; fil < DEPTH; fil++) {
  401. if (miny < 0 && histy[fil]) miny = fil;
  402. if (minu < 0 && histu[fil]) minu = fil;
  403. if (minv < 0 && histv[fil]) minv = fil;
  404. if (minsat < 0 && histsat[fil]) minsat = fil;
  405. if (histy[fil]) maxy = fil;
  406. if (histu[fil]) maxu = fil;
  407. if (histv[fil]) maxv = fil;
  408. if (histsat[fil]) maxsat = fil;
  409. toty += histy[fil] * fil;
  410. totu += histu[fil] * fil;
  411. totv += histv[fil] * fil;
  412. totsat += histsat[fil] * fil;
  413. accy += histy[fil];
  414. accu += histu[fil];
  415. accv += histv[fil];
  416. accsat += histsat[fil];
  417. if (lowy == -1 && accy >= lowp) lowy = fil;
  418. if (lowu == -1 && accu >= clowp) lowu = fil;
  419. if (lowv == -1 && accv >= clowp) lowv = fil;
  420. if (lowsat == -1 && accsat >= clowp) lowsat = fil;
  421. if (highy == -1 && accy >= highp) highy = fil;
  422. if (highu == -1 && accu >= chighp) highu = fil;
  423. if (highv == -1 && accv >= chighp) highv = fil;
  424. if (highsat == -1 && accsat >= chighp) highsat = fil;
  425. }
  426. maxhue = histhue[0];
  427. medhue = -1;
  428. for (fil = 0; fil < 360; fil++) {
  429. tothue += histhue[fil] * fil;
  430. acchue += histhue[fil];
  431. if (medhue == -1 && acchue > s->cfs / 2)
  432. medhue = fil;
  433. if (histhue[fil] > maxhue) {
  434. maxhue = histhue[fil];
  435. }
  436. }
  437. av_frame_free(&s->frame_prev);
  438. s->frame_prev = av_frame_clone(in);
  439. #define SET_META(key, fmt, val) do { \
  440. snprintf(metabuf, sizeof(metabuf), fmt, val); \
  441. av_dict_set(&out->metadata, "lavfi.signalstats." key, metabuf, 0); \
  442. } while (0)
  443. SET_META("YMIN", "%d", miny);
  444. SET_META("YLOW", "%d", lowy);
  445. SET_META("YAVG", "%g", 1.0 * toty / s->fs);
  446. SET_META("YHIGH", "%d", highy);
  447. SET_META("YMAX", "%d", maxy);
  448. SET_META("UMIN", "%d", minu);
  449. SET_META("ULOW", "%d", lowu);
  450. SET_META("UAVG", "%g", 1.0 * totu / s->cfs);
  451. SET_META("UHIGH", "%d", highu);
  452. SET_META("UMAX", "%d", maxu);
  453. SET_META("VMIN", "%d", minv);
  454. SET_META("VLOW", "%d", lowv);
  455. SET_META("VAVG", "%g", 1.0 * totv / s->cfs);
  456. SET_META("VHIGH", "%d", highv);
  457. SET_META("VMAX", "%d", maxv);
  458. SET_META("SATMIN", "%d", minsat);
  459. SET_META("SATLOW", "%d", lowsat);
  460. SET_META("SATAVG", "%g", 1.0 * totsat / s->cfs);
  461. SET_META("SATHIGH", "%d", highsat);
  462. SET_META("SATMAX", "%d", maxsat);
  463. SET_META("HUEMED", "%d", medhue);
  464. SET_META("HUEAVG", "%g", 1.0 * tothue / s->cfs);
  465. SET_META("YDIF", "%g", 1.0 * dify / s->fs);
  466. SET_META("UDIF", "%g", 1.0 * difu / s->cfs);
  467. SET_META("VDIF", "%g", 1.0 * difv / s->cfs);
  468. for (fil = 0; fil < FILT_NUMB; fil ++) {
  469. if (s->filters & 1<<fil) {
  470. char metaname[128];
  471. snprintf(metabuf, sizeof(metabuf), "%g", 1.0 * filtot[fil] / s->fs);
  472. snprintf(metaname, sizeof(metaname), "lavfi.signalstats.%s", filters_def[fil].name);
  473. av_dict_set(&out->metadata, metaname, metabuf, 0);
  474. }
  475. }
  476. if (in != out)
  477. av_frame_free(&in);
  478. return ff_filter_frame(outlink, out);
  479. }
  480. static const AVFilterPad signalstats_inputs[] = {
  481. {
  482. .name = "default",
  483. .type = AVMEDIA_TYPE_VIDEO,
  484. .filter_frame = filter_frame,
  485. },
  486. { NULL }
  487. };
  488. static const AVFilterPad signalstats_outputs[] = {
  489. {
  490. .name = "default",
  491. .config_props = config_props,
  492. .type = AVMEDIA_TYPE_VIDEO,
  493. },
  494. { NULL }
  495. };
  496. AVFilter ff_vf_signalstats = {
  497. .name = "signalstats",
  498. .description = "Generate statistics from video analysis.",
  499. .init = init,
  500. .uninit = uninit,
  501. .query_formats = query_formats,
  502. .priv_size = sizeof(SignalstatsContext),
  503. .inputs = signalstats_inputs,
  504. .outputs = signalstats_outputs,
  505. .priv_class = &signalstats_class,
  506. .flags = AVFILTER_FLAG_SLICE_THREADS,
  507. };