vf_gradfun.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. * Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
  3. * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * gradfun debanding filter, ported from MPlayer
  24. * libmpcodecs/vf_gradfun.c
  25. *
  26. * Apply a boxblur debanding algorithm (based on the gradfun2db
  27. * AviSynth filter by prunedtree).
  28. * Foreach pixel, if it's within threshold of the blurred value, make it closer.
  29. * So now we have a smoothed and higher bitdepth version of all the shallow
  30. * gradients, while leaving detailed areas untouched.
  31. * Dither it back to 8bit.
  32. */
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/opt.h"
  37. #include "libavutil/pixdesc.h"
  38. #include "avfilter.h"
  39. #include "formats.h"
  40. #include "gradfun.h"
  41. #include "internal.h"
  42. #include "video.h"
  43. DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
  44. {0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
  45. {0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E},
  46. {0x10,0x70,0x08,0x68,0x16,0x76,0x0E,0x6E},
  47. {0x50,0x30,0x48,0x28,0x56,0x36,0x4E,0x2E},
  48. {0x04,0x64,0x1C,0x7C,0x02,0x62,0x1A,0x7A},
  49. {0x44,0x24,0x5C,0x3C,0x42,0x22,0x5A,0x3A},
  50. {0x14,0x74,0x0C,0x6C,0x12,0x72,0x0A,0x6A},
  51. {0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
  52. };
  53. void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
  54. {
  55. int x;
  56. for (x = 0; x < width; dc += x & 1, x++) {
  57. int pix = src[x] << 7;
  58. int delta = dc[0] - pix;
  59. int m = abs(delta) * thresh >> 16;
  60. m = FFMAX(0, 127 - m);
  61. m = m * m * delta >> 14;
  62. pix += m + dithers[x & 7];
  63. dst[x] = av_clip_uint8(pix >> 7);
  64. }
  65. }
  66. void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
  67. {
  68. int x, v, old;
  69. for (x = 0; x < width; x++) {
  70. v = buf1[x] + src[2 * x] + src[2 * x + 1] + src[2 * x + src_linesize] + src[2 * x + 1 + src_linesize];
  71. old = buf[x];
  72. buf[x] = v;
  73. dc[x] = v - old;
  74. }
  75. }
  76. static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
  77. {
  78. int bstride = FFALIGN(width, 16) / 2;
  79. int y;
  80. uint32_t dc_factor = (1 << 21) / (r * r);
  81. uint16_t *dc = ctx->buf + 16;
  82. uint16_t *buf = ctx->buf + bstride + 32;
  83. int thresh = ctx->thresh;
  84. memset(dc, 0, (bstride + 16) * sizeof(*buf));
  85. for (y = 0; y < r; y++)
  86. ctx->blur_line(dc, buf + y * bstride, buf + (y - 1) * bstride, src + 2 * y * src_linesize, src_linesize, width / 2);
  87. for (;;) {
  88. if (y < height - r) {
  89. int mod = ((y + r) / 2) % r;
  90. uint16_t *buf0 = buf + mod * bstride;
  91. uint16_t *buf1 = buf + (mod ? mod - 1 : r - 1) * bstride;
  92. int x, v;
  93. ctx->blur_line(dc, buf0, buf1, src + (y + r) * src_linesize, src_linesize, width / 2);
  94. for (x = v = 0; x < r; x++)
  95. v += dc[x];
  96. for (; x < width / 2; x++) {
  97. v += dc[x] - dc[x-r];
  98. dc[x-r] = v * dc_factor >> 16;
  99. }
  100. for (; x < (width + r + 1) / 2; x++)
  101. dc[x-r] = v * dc_factor >> 16;
  102. for (x = -r / 2; x < 0; x++)
  103. dc[x] = dc[0];
  104. }
  105. if (y == r) {
  106. for (y = 0; y < r; y++)
  107. ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
  108. }
  109. ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
  110. if (++y >= height) break;
  111. ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
  112. if (++y >= height) break;
  113. }
  114. emms_c();
  115. }
  116. static av_cold int init(AVFilterContext *ctx)
  117. {
  118. GradFunContext *s = ctx->priv;
  119. s->thresh = (1 << 15) / s->strength;
  120. s->radius = av_clip((s->radius + 1) & ~1, 4, 32);
  121. s->blur_line = ff_gradfun_blur_line_c;
  122. s->filter_line = ff_gradfun_filter_line_c;
  123. if (ARCH_X86)
  124. ff_gradfun_init_x86(s);
  125. av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", s->strength, s->radius);
  126. return 0;
  127. }
  128. static av_cold void uninit(AVFilterContext *ctx)
  129. {
  130. GradFunContext *s = ctx->priv;
  131. av_freep(&s->buf);
  132. }
  133. static int query_formats(AVFilterContext *ctx)
  134. {
  135. static const enum AVPixelFormat pix_fmts[] = {
  136. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV420P,
  137. AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUV444P,
  138. AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
  139. AV_PIX_FMT_YUV440P,
  140. AV_PIX_FMT_GBRP,
  141. AV_PIX_FMT_NONE
  142. };
  143. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  144. return 0;
  145. }
  146. static int config_input(AVFilterLink *inlink)
  147. {
  148. GradFunContext *s = inlink->dst->priv;
  149. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  150. int hsub = desc->log2_chroma_w;
  151. int vsub = desc->log2_chroma_h;
  152. av_freep(&s->buf);
  153. s->buf = av_calloc((FFALIGN(inlink->w, 16) * (s->radius + 1) / 2 + 32), sizeof(*s->buf));
  154. if (!s->buf)
  155. return AVERROR(ENOMEM);
  156. s->chroma_w = FF_CEIL_RSHIFT(inlink->w, hsub);
  157. s->chroma_h = FF_CEIL_RSHIFT(inlink->h, vsub);
  158. s->chroma_r = av_clip(((((s->radius >> hsub) + (s->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
  159. return 0;
  160. }
  161. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  162. {
  163. GradFunContext *s = inlink->dst->priv;
  164. AVFilterLink *outlink = inlink->dst->outputs[0];
  165. AVFrame *out;
  166. int p, direct;
  167. if (av_frame_is_writable(in)) {
  168. direct = 1;
  169. out = in;
  170. } else {
  171. direct = 0;
  172. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  173. if (!out) {
  174. av_frame_free(&in);
  175. return AVERROR(ENOMEM);
  176. }
  177. av_frame_copy_props(out, in);
  178. }
  179. for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
  180. int w = inlink->w;
  181. int h = inlink->h;
  182. int r = s->radius;
  183. if (p) {
  184. w = s->chroma_w;
  185. h = s->chroma_h;
  186. r = s->chroma_r;
  187. }
  188. if (FFMIN(w, h) > 2 * r)
  189. filter(s, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);
  190. else if (out->data[p] != in->data[p])
  191. av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h);
  192. }
  193. if (!direct)
  194. av_frame_free(&in);
  195. return ff_filter_frame(outlink, out);
  196. }
  197. #define OFFSET(x) offsetof(GradFunContext, x)
  198. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  199. static const AVOption gradfun_options[] = {
  200. { "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
  201. { "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
  202. { NULL }
  203. };
  204. AVFILTER_DEFINE_CLASS(gradfun);
  205. static const AVFilterPad avfilter_vf_gradfun_inputs[] = {
  206. {
  207. .name = "default",
  208. .type = AVMEDIA_TYPE_VIDEO,
  209. .config_props = config_input,
  210. .filter_frame = filter_frame,
  211. },
  212. { NULL }
  213. };
  214. static const AVFilterPad avfilter_vf_gradfun_outputs[] = {
  215. {
  216. .name = "default",
  217. .type = AVMEDIA_TYPE_VIDEO,
  218. },
  219. { NULL }
  220. };
  221. AVFilter ff_vf_gradfun = {
  222. .name = "gradfun",
  223. .description = NULL_IF_CONFIG_SMALL("Debands video quickly using gradients."),
  224. .priv_size = sizeof(GradFunContext),
  225. .priv_class = &gradfun_class,
  226. .init = init,
  227. .uninit = uninit,
  228. .query_formats = query_formats,
  229. .inputs = avfilter_vf_gradfun_inputs,
  230. .outputs = avfilter_vf_gradfun_outputs,
  231. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  232. };