vf_pp7.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
  3. * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. /**
  22. * @file
  23. * Postprocessing filter - 7
  24. *
  25. * Originally written by Michael Niedermayer for the MPlayer
  26. * project, and ported by Arwa Arif for FFmpeg.
  27. */
  28. #include "libavutil/avassert.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "internal.h"
  33. #include "vf_pp7.h"
  34. enum mode {
  35. MODE_HARD,
  36. MODE_SOFT,
  37. MODE_MEDIUM
  38. };
  39. #define OFFSET(x) offsetof(PP7Context, x)
  40. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  41. static const AVOption pp7_options[] = {
  42. { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
  43. { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
  44. { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
  45. { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
  46. { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
  47. { NULL }
  48. };
  49. AVFILTER_DEFINE_CLASS(pp7);
  50. DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
  51. { 0, 48, 12, 60, 3, 51, 15, 63, },
  52. { 32, 16, 44, 28, 35, 19, 47, 31, },
  53. { 8, 56, 4, 52, 11, 59, 7, 55, },
  54. { 40, 24, 36, 20, 43, 27, 39, 23, },
  55. { 2, 50, 14, 62, 1, 49, 13, 61, },
  56. { 34, 18, 46, 30, 33, 17, 45, 29, },
  57. { 10, 58, 6, 54, 9, 57, 5, 53, },
  58. { 42, 26, 38, 22, 41, 25, 37, 21, },
  59. };
  60. #define N0 4
  61. #define N1 5
  62. #define N2 10
  63. #define SN0 2
  64. #define SN1 2.2360679775
  65. #define SN2 3.16227766017
  66. #define N (1 << 16)
  67. static const int factor[16] = {
  68. N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
  69. N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
  70. N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
  71. N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
  72. };
  73. static void init_thres2(PP7Context *p)
  74. {
  75. int qp, i;
  76. int bias = 0; //FIXME
  77. for (qp = 0; qp < 99; qp++) {
  78. for (i = 0; i < 16; i++) {
  79. p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
  80. }
  81. }
  82. }
  83. static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
  84. {
  85. int i;
  86. for (i = 0; i < 4; i++) {
  87. int s0 = src[0 * stride] + src[6 * stride];
  88. int s1 = src[1 * stride] + src[5 * stride];
  89. int s2 = src[2 * stride] + src[4 * stride];
  90. int s3 = src[3 * stride];
  91. int s = s3 + s3;
  92. s3 = s - s0;
  93. s0 = s + s0;
  94. s = s2 + s1;
  95. s2 = s2 - s1;
  96. dst[0] = s0 + s;
  97. dst[2] = s0 - s;
  98. dst[1] = 2 * s3 + s2;
  99. dst[3] = s3 - 2 * s2;
  100. src++;
  101. dst += 4;
  102. }
  103. }
  104. static void dctB_c(int16_t *dst, int16_t *src)
  105. {
  106. int i;
  107. for (i = 0; i < 4; i++) {
  108. int s0 = src[0 * 4] + src[6 * 4];
  109. int s1 = src[1 * 4] + src[5 * 4];
  110. int s2 = src[2 * 4] + src[4 * 4];
  111. int s3 = src[3 * 4];
  112. int s = s3 + s3;
  113. s3 = s - s0;
  114. s0 = s + s0;
  115. s = s2 + s1;
  116. s2 = s2 - s1;
  117. dst[0 * 4] = s0 + s;
  118. dst[2 * 4] = s0 - s;
  119. dst[1 * 4] = 2 * s3 + s2;
  120. dst[3 * 4] = s3 - 2 * s2;
  121. src++;
  122. dst++;
  123. }
  124. }
  125. static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
  126. {
  127. int i;
  128. int a;
  129. a = src[0] * factor[0];
  130. for (i = 1; i < 16; i++) {
  131. unsigned int threshold1 = p->thres2[qp][i];
  132. unsigned int threshold2 = threshold1 << 1;
  133. int level = src[i];
  134. if (((unsigned)(level + threshold1)) > threshold2)
  135. a += level * factor[i];
  136. }
  137. return (a + (1 << 11)) >> 12;
  138. }
  139. static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
  140. {
  141. int i;
  142. int a;
  143. a = src[0] * factor[0];
  144. for (i = 1; i < 16; i++) {
  145. unsigned int threshold1 = p->thres2[qp][i];
  146. unsigned int threshold2 = threshold1 << 1;
  147. int level = src[i];
  148. if (((unsigned)(level + threshold1)) > threshold2) {
  149. if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
  150. a += level * factor[i];
  151. else {
  152. if (level > 0)
  153. a += 2 * (level - (int)threshold1) * factor[i];
  154. else
  155. a += 2 * (level + (int)threshold1) * factor[i];
  156. }
  157. }
  158. }
  159. return (a + (1 << 11)) >> 12;
  160. }
  161. static int softthresh_c(PP7Context *p, int16_t *src, int qp)
  162. {
  163. int i;
  164. int a;
  165. a = src[0] * factor[0];
  166. for (i = 1; i < 16; i++) {
  167. unsigned int threshold1 = p->thres2[qp][i];
  168. unsigned int threshold2 = threshold1 << 1;
  169. int level = src[i];
  170. if (((unsigned)(level + threshold1)) > threshold2) {
  171. if (level > 0)
  172. a += (level - (int)threshold1) * factor[i];
  173. else
  174. a += (level + (int)threshold1) * factor[i];
  175. }
  176. }
  177. return (a + (1 << 11)) >> 12;
  178. }
  179. static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
  180. int dst_stride, int src_stride,
  181. int width, int height,
  182. uint8_t *qp_store, int qp_stride, int is_luma)
  183. {
  184. int x, y;
  185. const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
  186. uint8_t *p_src = p->src + 8 * stride;
  187. int16_t *block = (int16_t *)p->src;
  188. int16_t *temp = (int16_t *)(p->src + 32);
  189. if (!src || !dst) return;
  190. for (y = 0; y < height; y++) {
  191. int index = 8 + 8 * stride + y * stride;
  192. memcpy(p_src + index, src + y * src_stride, width);
  193. for (x = 0; x < 8; x++) {
  194. p_src[index - x - 1]= p_src[index + x ];
  195. p_src[index + width + x ]= p_src[index + width - x - 1];
  196. }
  197. }
  198. for (y = 0; y < 8; y++) {
  199. memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
  200. memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
  201. }
  202. //FIXME (try edge emu)
  203. for (y = 0; y < height; y++) {
  204. for (x = -8; x < 0; x += 4) {
  205. const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
  206. uint8_t *src = p_src + index;
  207. int16_t *tp = temp + 4 * x;
  208. dctA_c(tp + 4 * 8, src, stride);
  209. }
  210. for (x = 0; x < width; ) {
  211. const int qps = 3 + is_luma;
  212. int qp;
  213. int end = FFMIN(x + 8, width);
  214. if (p->qp)
  215. qp = p->qp;
  216. else {
  217. qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
  218. qp = ff_norm_qscale(qp, p->qscale_type);
  219. }
  220. for (; x < end; x++) {
  221. const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
  222. uint8_t *src = p_src + index;
  223. int16_t *tp = temp + 4 * x;
  224. int v;
  225. if ((x & 3) == 0)
  226. dctA_c(tp + 4 * 8, src, stride);
  227. p->dctB(block, tp);
  228. v = p->requantize(p, block, qp);
  229. v = (v + dither[y & 7][x & 7]) >> 6;
  230. if ((unsigned)v > 255)
  231. v = (-v) >> 31;
  232. dst[x + y * dst_stride] = v;
  233. }
  234. }
  235. }
  236. }
  237. static int query_formats(AVFilterContext *ctx)
  238. {
  239. static const enum AVPixelFormat pix_fmts[] = {
  240. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
  241. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
  242. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  243. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
  244. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
  245. AV_PIX_FMT_GBRP,
  246. AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
  247. };
  248. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  249. if (!fmts_list)
  250. return AVERROR(ENOMEM);
  251. return ff_set_common_formats(ctx, fmts_list);
  252. }
  253. static int config_input(AVFilterLink *inlink)
  254. {
  255. AVFilterContext *ctx = inlink->dst;
  256. PP7Context *pp7 = ctx->priv;
  257. const int h = FFALIGN(inlink->h + 16, 16);
  258. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  259. pp7->hsub = desc->log2_chroma_w;
  260. pp7->vsub = desc->log2_chroma_h;
  261. pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
  262. pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
  263. if (!pp7->src)
  264. return AVERROR(ENOMEM);
  265. init_thres2(pp7);
  266. switch (pp7->mode) {
  267. case 0: pp7->requantize = hardthresh_c; break;
  268. case 1: pp7->requantize = softthresh_c; break;
  269. default:
  270. case 2: pp7->requantize = mediumthresh_c; break;
  271. }
  272. pp7->dctB = dctB_c;
  273. if (ARCH_X86)
  274. ff_pp7_init_x86(pp7);
  275. return 0;
  276. }
  277. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  278. {
  279. AVFilterContext *ctx = inlink->dst;
  280. PP7Context *pp7 = ctx->priv;
  281. AVFilterLink *outlink = ctx->outputs[0];
  282. AVFrame *out = in;
  283. int qp_stride = 0;
  284. uint8_t *qp_table = NULL;
  285. if (!pp7->qp)
  286. qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
  287. if (!ctx->is_disabled) {
  288. const int cw = AV_CEIL_RSHIFT(inlink->w, pp7->hsub);
  289. const int ch = AV_CEIL_RSHIFT(inlink->h, pp7->vsub);
  290. /* get a new frame if in-place is not possible or if the dimensions
  291. * are not multiple of 8 */
  292. if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
  293. const int aligned_w = FFALIGN(inlink->w, 8);
  294. const int aligned_h = FFALIGN(inlink->h, 8);
  295. out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
  296. if (!out) {
  297. av_frame_free(&in);
  298. return AVERROR(ENOMEM);
  299. }
  300. av_frame_copy_props(out, in);
  301. out->width = in->width;
  302. out->height = in->height;
  303. }
  304. if (qp_table || pp7->qp) {
  305. filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
  306. inlink->w, inlink->h, qp_table, qp_stride, 1);
  307. filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
  308. cw, ch, qp_table, qp_stride, 0);
  309. filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
  310. cw, ch, qp_table, qp_stride, 0);
  311. emms_c();
  312. }
  313. }
  314. if (in != out) {
  315. if (in->data[3])
  316. av_image_copy_plane(out->data[3], out->linesize[3],
  317. in ->data[3], in ->linesize[3],
  318. inlink->w, inlink->h);
  319. av_frame_free(&in);
  320. }
  321. return ff_filter_frame(outlink, out);
  322. }
  323. static av_cold void uninit(AVFilterContext *ctx)
  324. {
  325. PP7Context *pp7 = ctx->priv;
  326. av_freep(&pp7->src);
  327. }
  328. static const AVFilterPad pp7_inputs[] = {
  329. {
  330. .name = "default",
  331. .type = AVMEDIA_TYPE_VIDEO,
  332. .config_props = config_input,
  333. .filter_frame = filter_frame,
  334. },
  335. { NULL }
  336. };
  337. static const AVFilterPad pp7_outputs[] = {
  338. {
  339. .name = "default",
  340. .type = AVMEDIA_TYPE_VIDEO,
  341. },
  342. { NULL }
  343. };
  344. AVFilter ff_vf_pp7 = {
  345. .name = "pp7",
  346. .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
  347. .priv_size = sizeof(PP7Context),
  348. .uninit = uninit,
  349. .query_formats = query_formats,
  350. .inputs = pp7_inputs,
  351. .outputs = pp7_outputs,
  352. .priv_class = &pp7_class,
  353. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
  354. };