vf_pp7.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. /*
  2. * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
  3. * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. /**
  22. * @file
  23. * Postprocessing filter - 7
  24. *
  25. * Originally written by Michael Niedermayer for the MPlayer
  26. * project, and ported by Arwa Arif for FFmpeg.
  27. */
  28. #include "libavutil/avassert.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "internal.h"
  33. #include "vf_pp7.h"
  34. enum mode {
  35. MODE_HARD,
  36. MODE_SOFT,
  37. MODE_MEDIUM
  38. };
  39. #define OFFSET(x) offsetof(PP7Context, x)
  40. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  41. static const AVOption pp7_options[] = {
  42. { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
  43. { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
  44. { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
  45. { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
  46. { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
  47. { NULL }
  48. };
  49. AVFILTER_DEFINE_CLASS(pp7);
  50. DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
  51. { 0, 48, 12, 60, 3, 51, 15, 63, },
  52. { 32, 16, 44, 28, 35, 19, 47, 31, },
  53. { 8, 56, 4, 52, 11, 59, 7, 55, },
  54. { 40, 24, 36, 20, 43, 27, 39, 23, },
  55. { 2, 50, 14, 62, 1, 49, 13, 61, },
  56. { 34, 18, 46, 30, 33, 17, 45, 29, },
  57. { 10, 58, 6, 54, 9, 57, 5, 53, },
  58. { 42, 26, 38, 22, 41, 25, 37, 21, },
  59. };
  60. #define N0 4
  61. #define N1 5
  62. #define N2 10
  63. #define SN0 2
  64. #define SN1 2.2360679775
  65. #define SN2 3.16227766017
  66. #define N (1 << 16)
  67. static const int factor[16] = {
  68. N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
  69. N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
  70. N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
  71. N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
  72. };
  73. static const int thres[16] = {
  74. N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
  75. N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
  76. N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
  77. N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
  78. };
  79. static void init_thres2(PP7Context *p)
  80. {
  81. int qp, i;
  82. int bias = 0; //FIXME
  83. for (qp = 0; qp < 99; qp++) {
  84. for (i = 0; i < 16; i++) {
  85. p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
  86. }
  87. }
  88. }
  89. static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
  90. {
  91. int i;
  92. for (i = 0; i < 4; i++) {
  93. int s0 = src[0 * stride] + src[6 * stride];
  94. int s1 = src[1 * stride] + src[5 * stride];
  95. int s2 = src[2 * stride] + src[4 * stride];
  96. int s3 = src[3 * stride];
  97. int s = s3 + s3;
  98. s3 = s - s0;
  99. s0 = s + s0;
  100. s = s2 + s1;
  101. s2 = s2 - s1;
  102. dst[0] = s0 + s;
  103. dst[2] = s0 - s;
  104. dst[1] = 2 * s3 + s2;
  105. dst[3] = s3 - 2 * s2;
  106. src++;
  107. dst += 4;
  108. }
  109. }
  110. static void dctB_c(int16_t *dst, int16_t *src)
  111. {
  112. int i;
  113. for (i = 0; i < 4; i++) {
  114. int s0 = src[0 * 4] + src[6 * 4];
  115. int s1 = src[1 * 4] + src[5 * 4];
  116. int s2 = src[2 * 4] + src[4 * 4];
  117. int s3 = src[3 * 4];
  118. int s = s3 + s3;
  119. s3 = s - s0;
  120. s0 = s + s0;
  121. s = s2 + s1;
  122. s2 = s2 - s1;
  123. dst[0 * 4] = s0 + s;
  124. dst[2 * 4] = s0 - s;
  125. dst[1 * 4] = 2 * s3 + s2;
  126. dst[3 * 4] = s3 - 2 * s2;
  127. src++;
  128. dst++;
  129. }
  130. }
  131. static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
  132. {
  133. int i;
  134. int a;
  135. a = src[0] * factor[0];
  136. for (i = 1; i < 16; i++) {
  137. unsigned int threshold1 = p->thres2[qp][i];
  138. unsigned int threshold2 = threshold1 << 1;
  139. int level = src[i];
  140. if (((unsigned)(level + threshold1)) > threshold2)
  141. a += level * factor[i];
  142. }
  143. return (a + (1 << 11)) >> 12;
  144. }
  145. static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
  146. {
  147. int i;
  148. int a;
  149. a = src[0] * factor[0];
  150. for (i = 1; i < 16; i++) {
  151. unsigned int threshold1 = p->thres2[qp][i];
  152. unsigned int threshold2 = threshold1 << 1;
  153. int level = src[i];
  154. if (((unsigned)(level + threshold1)) > threshold2) {
  155. if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
  156. a += level * factor[i];
  157. else {
  158. if (level > 0)
  159. a += 2 * (level - (int)threshold1) * factor[i];
  160. else
  161. a += 2 * (level + (int)threshold1) * factor[i];
  162. }
  163. }
  164. }
  165. return (a + (1 << 11)) >> 12;
  166. }
  167. static int softthresh_c(PP7Context *p, int16_t *src, int qp)
  168. {
  169. int i;
  170. int a;
  171. a = src[0] * factor[0];
  172. for (i = 1; i < 16; i++) {
  173. unsigned int threshold1 = p->thres2[qp][i];
  174. unsigned int threshold2 = threshold1 << 1;
  175. int level = src[i];
  176. if (((unsigned)(level + threshold1)) > threshold2) {
  177. if (level > 0)
  178. a += (level - (int)threshold1) * factor[i];
  179. else
  180. a += (level + (int)threshold1) * factor[i];
  181. }
  182. }
  183. return (a + (1 << 11)) >> 12;
  184. }
  185. static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
  186. int dst_stride, int src_stride,
  187. int width, int height,
  188. uint8_t *qp_store, int qp_stride, int is_luma)
  189. {
  190. int x, y;
  191. const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
  192. uint8_t *p_src = p->src + 8 * stride;
  193. int16_t *block = (int16_t *)p->src;
  194. int16_t *temp = (int16_t *)(p->src + 32);
  195. if (!src || !dst) return;
  196. for (y = 0; y < height; y++) {
  197. int index = 8 + 8 * stride + y * stride;
  198. memcpy(p_src + index, src + y * src_stride, width);
  199. for (x = 0; x < 8; x++) {
  200. p_src[index - x - 1]= p_src[index + x ];
  201. p_src[index + width + x ]= p_src[index + width - x - 1];
  202. }
  203. }
  204. for (y = 0; y < 8; y++) {
  205. memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
  206. memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
  207. }
  208. //FIXME (try edge emu)
  209. for (y = 0; y < height; y++) {
  210. for (x = -8; x < 0; x += 4) {
  211. const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
  212. uint8_t *src = p_src + index;
  213. int16_t *tp = temp + 4 * x;
  214. dctA_c(tp + 4 * 8, src, stride);
  215. }
  216. for (x = 0; x < width; ) {
  217. const int qps = 3 + is_luma;
  218. int qp;
  219. int end = FFMIN(x + 8, width);
  220. if (p->qp)
  221. qp = p->qp;
  222. else {
  223. qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
  224. qp = ff_norm_qscale(qp, p->qscale_type);
  225. }
  226. for (; x < end; x++) {
  227. const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
  228. uint8_t *src = p_src + index;
  229. int16_t *tp = temp + 4 * x;
  230. int v;
  231. if ((x & 3) == 0)
  232. dctA_c(tp + 4 * 8, src, stride);
  233. p->dctB(block, tp);
  234. v = p->requantize(p, block, qp);
  235. v = (v + dither[y & 7][x & 7]) >> 6;
  236. if ((unsigned)v > 255)
  237. v = (-v) >> 31;
  238. dst[x + y * dst_stride] = v;
  239. }
  240. }
  241. }
  242. }
  243. static int query_formats(AVFilterContext *ctx)
  244. {
  245. static const enum PixelFormat pix_fmts[] = {
  246. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  247. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  248. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  249. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
  250. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
  251. AV_PIX_FMT_GBRP,
  252. AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
  253. };
  254. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  255. return 0;
  256. }
  257. static int config_input(AVFilterLink *inlink)
  258. {
  259. AVFilterContext *ctx = inlink->dst;
  260. PP7Context *pp7 = ctx->priv;
  261. const int h = FFALIGN(inlink->h + 16, 16);
  262. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  263. pp7->hsub = desc->log2_chroma_w;
  264. pp7->vsub = desc->log2_chroma_h;
  265. pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
  266. pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
  267. if (!pp7->src)
  268. return AVERROR(ENOMEM);
  269. init_thres2(pp7);
  270. switch (pp7->mode) {
  271. case 0: pp7->requantize = hardthresh_c; break;
  272. case 1: pp7->requantize = softthresh_c; break;
  273. default:
  274. case 2: pp7->requantize = mediumthresh_c; break;
  275. }
  276. pp7->dctB = dctB_c;
  277. if (ARCH_X86)
  278. ff_pp7_init_x86(pp7);
  279. return 0;
  280. }
  281. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  282. {
  283. AVFilterContext *ctx = inlink->dst;
  284. PP7Context *pp7 = ctx->priv;
  285. AVFilterLink *outlink = ctx->outputs[0];
  286. AVFrame *out = in;
  287. int qp_stride = 0;
  288. uint8_t *qp_table = NULL;
  289. if (!pp7->qp)
  290. qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
  291. if (!ctx->is_disabled) {
  292. const int cw = FF_CEIL_RSHIFT(inlink->w, pp7->hsub);
  293. const int ch = FF_CEIL_RSHIFT(inlink->h, pp7->vsub);
  294. /* get a new frame if in-place is not possible or if the dimensions
  295. * are not multiple of 8 */
  296. if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
  297. const int aligned_w = FFALIGN(inlink->w, 8);
  298. const int aligned_h = FFALIGN(inlink->h, 8);
  299. out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
  300. if (!out) {
  301. av_frame_free(&in);
  302. return AVERROR(ENOMEM);
  303. }
  304. av_frame_copy_props(out, in);
  305. out->width = in->width;
  306. out->height = in->height;
  307. }
  308. if (qp_table || pp7->qp) {
  309. filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
  310. inlink->w, inlink->h, qp_table, qp_stride, 1);
  311. filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
  312. cw, ch, qp_table, qp_stride, 0);
  313. filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
  314. cw, ch, qp_table, qp_stride, 0);
  315. emms_c();
  316. }
  317. }
  318. if (in != out) {
  319. if (in->data[3])
  320. av_image_copy_plane(out->data[3], out->linesize[3],
  321. in ->data[3], in ->linesize[3],
  322. inlink->w, inlink->h);
  323. av_frame_free(&in);
  324. }
  325. return ff_filter_frame(outlink, out);
  326. }
  327. static av_cold void uninit(AVFilterContext *ctx)
  328. {
  329. PP7Context *pp7 = ctx->priv;
  330. av_freep(&pp7->src);
  331. }
  332. static const AVFilterPad pp7_inputs[] = {
  333. {
  334. .name = "default",
  335. .type = AVMEDIA_TYPE_VIDEO,
  336. .config_props = config_input,
  337. .filter_frame = filter_frame,
  338. },
  339. { NULL }
  340. };
  341. static const AVFilterPad pp7_outputs[] = {
  342. {
  343. .name = "default",
  344. .type = AVMEDIA_TYPE_VIDEO,
  345. },
  346. { NULL }
  347. };
  348. AVFilter ff_vf_pp7 = {
  349. .name = "pp7",
  350. .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
  351. .priv_size = sizeof(PP7Context),
  352. .uninit = uninit,
  353. .query_formats = query_formats,
  354. .inputs = pp7_inputs,
  355. .outputs = pp7_outputs,
  356. .priv_class = &pp7_class,
  357. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
  358. };