vf_nnedi.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. /*
  2. * Copyright (C) 2010-2011 Kevin Stone
  3. * Copyright (C) 2016 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. #include <float.h>
  22. #include "libavutil/common.h"
  23. #include "libavutil/float_dsp.h"
  24. #include "libavutil/imgutils.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/pixdesc.h"
  27. #include "avfilter.h"
  28. #include "formats.h"
  29. #include "internal.h"
  30. #include "video.h"
  31. typedef struct FrameData {
  32. uint8_t *paddedp[3];
  33. int padded_stride[3];
  34. int padded_width[3];
  35. int padded_height[3];
  36. uint8_t *dstp[3];
  37. int dst_stride[3];
  38. int field[3];
  39. int32_t *lcount[3];
  40. float *input;
  41. float *temp;
  42. } FrameData;
  43. typedef struct NNEDIContext {
  44. const AVClass *class;
  45. char *weights_file;
  46. AVFrame *src;
  47. AVFrame *second;
  48. AVFrame *dst;
  49. int eof;
  50. int64_t cur_pts;
  51. AVFloatDSPContext *fdsp;
  52. int nb_planes;
  53. int linesize[4];
  54. int planeheight[4];
  55. float *weights0;
  56. float *weights1[2];
  57. int asize;
  58. int nns;
  59. int xdia;
  60. int ydia;
  61. // Parameters
  62. int deint;
  63. int field;
  64. int process_plane;
  65. int nsize;
  66. int nnsparam;
  67. int qual;
  68. int etype;
  69. int pscrn;
  70. int fapprox;
  71. int max_value;
  72. void (*copy_pad)(const AVFrame *, FrameData *, struct NNEDIContext *, int);
  73. void (*evalfunc_0)(struct NNEDIContext *, FrameData *);
  74. void (*evalfunc_1)(struct NNEDIContext *, FrameData *);
  75. // Functions used in evalfunc_0
  76. void (*readpixels)(const uint8_t *, const int, float *);
  77. void (*compute_network0)(struct NNEDIContext *s, const float *, const float *, uint8_t *);
  78. int32_t (*process_line0)(const uint8_t *, int, uint8_t *, const uint8_t *, const int, const int, const int);
  79. // Functions used in evalfunc_1
  80. void (*extract)(const uint8_t *, const int, const int, const int, float *, float *);
  81. void (*dot_prod)(struct NNEDIContext *, const float *, const float *, float *, const int, const int, const float *);
  82. void (*expfunc)(float *, const int);
  83. void (*wae5)(const float *, const int, float *);
  84. FrameData frame_data;
  85. } NNEDIContext;
  86. #define OFFSET(x) offsetof(NNEDIContext, x)
  87. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  88. static const AVOption nnedi_options[] = {
  89. {"weights", "set weights file", OFFSET(weights_file), AV_OPT_TYPE_STRING, {.str="nnedi3_weights.bin"}, 0, 0, FLAGS },
  90. {"deint", "set which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "deint" },
  91. {"all", "deinterlace all frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "deint" },
  92. {"interlaced", "only deinterlace frames marked as interlaced", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "deint" },
  93. {"field", "set mode of operation", OFFSET(field), AV_OPT_TYPE_INT, {.i64=-1}, -2, 3, FLAGS, "field" },
  94. {"af", "use frame flags, both fields", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, FLAGS, "field" },
  95. {"a", "use frame flags, single field", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "field" },
  96. {"t", "use top field only", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field" },
  97. {"b", "use bottom field only", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field" },
  98. {"tf", "use both fields, top first", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "field" },
  99. {"bf", "use both fields, bottom first", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "field" },
  100. {"planes", "set which planes to process", OFFSET(process_plane), AV_OPT_TYPE_INT, {.i64=7}, 0, 7, FLAGS },
  101. {"nsize", "set size of local neighborhood around each pixel, used by the predictor neural network", OFFSET(nsize), AV_OPT_TYPE_INT, {.i64=6}, 0, 6, FLAGS, "nsize" },
  102. {"s8x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "nsize" },
  103. {"s16x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "nsize" },
  104. {"s32x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "nsize" },
  105. {"s48x6", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "nsize" },
  106. {"s8x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "nsize" },
  107. {"s16x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "nsize" },
  108. {"s32x4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "nsize" },
  109. {"nns", "set number of neurons in predictor neural network", OFFSET(nnsparam), AV_OPT_TYPE_INT, {.i64=1}, 0, 4, FLAGS, "nns" },
  110. {"n16", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "nns" },
  111. {"n32", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "nns" },
  112. {"n64", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "nns" },
  113. {"n128", NULL, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "nns" },
  114. {"n256", NULL, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "nns" },
  115. {"qual", "set quality", OFFSET(qual), AV_OPT_TYPE_INT, {.i64=1}, 1, 2, FLAGS, "qual" },
  116. {"fast", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "qual" },
  117. {"slow", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "qual" },
  118. {"etype", "set which set of weights to use in the predictor", OFFSET(etype), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "etype" },
  119. {"a", "weights trained to minimize absolute error", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "etype" },
  120. {"s", "weights trained to minimize squared error", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "etype" },
  121. {"pscrn", "set prescreening", OFFSET(pscrn), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "pscrn" },
  122. {"none", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "pscrn" },
  123. {"original", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "pscrn" },
  124. {"new", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "pscrn" },
  125. {"fapprox", NULL, OFFSET(fapprox), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, FLAGS },
  126. { NULL }
  127. };
  128. AVFILTER_DEFINE_CLASS(nnedi);
  129. static int config_input(AVFilterLink *inlink)
  130. {
  131. AVFilterContext *ctx = inlink->dst;
  132. NNEDIContext *s = ctx->priv;
  133. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  134. int ret;
  135. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  136. if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
  137. return ret;
  138. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  139. s->planeheight[0] = s->planeheight[3] = inlink->h;
  140. return 0;
  141. }
  142. static int config_output(AVFilterLink *outlink)
  143. {
  144. AVFilterContext *ctx = outlink->src;
  145. NNEDIContext *s = ctx->priv;
  146. outlink->time_base.num = ctx->inputs[0]->time_base.num;
  147. outlink->time_base.den = ctx->inputs[0]->time_base.den * 2;
  148. outlink->w = ctx->inputs[0]->w;
  149. outlink->h = ctx->inputs[0]->h;
  150. if (s->field > 1 || s->field == -2)
  151. outlink->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
  152. (AVRational){2, 1});
  153. return 0;
  154. }
  155. static int query_formats(AVFilterContext *ctx)
  156. {
  157. static const enum AVPixelFormat pix_fmts[] = {
  158. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  159. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  160. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  161. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  162. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
  163. AV_PIX_FMT_YUVJ411P,
  164. AV_PIX_FMT_GBRP,
  165. AV_PIX_FMT_GRAY8,
  166. AV_PIX_FMT_NONE
  167. };
  168. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  169. if (!fmts_list)
  170. return AVERROR(ENOMEM);
  171. return ff_set_common_formats(ctx, fmts_list);
  172. }
  173. static void copy_pad(const AVFrame *src, FrameData *frame_data, NNEDIContext *s, int fn)
  174. {
  175. const int off = 1 - fn;
  176. int plane, y, x;
  177. for (plane = 0; plane < s->nb_planes; plane++) {
  178. const uint8_t *srcp = (const uint8_t *)src->data[plane];
  179. uint8_t *dstp = (uint8_t *)frame_data->paddedp[plane];
  180. const int src_stride = src->linesize[plane];
  181. const int dst_stride = frame_data->padded_stride[plane];
  182. const int src_height = s->planeheight[plane];
  183. const int dst_height = frame_data->padded_height[plane];
  184. const int src_width = s->linesize[plane];
  185. const int dst_width = frame_data->padded_width[plane];
  186. int c = 4;
  187. if (!(s->process_plane & (1 << plane)))
  188. continue;
  189. // Copy.
  190. for (y = off; y < src_height; y += 2)
  191. memcpy(dstp + 32 + (6 + y) * dst_stride,
  192. srcp + y * src_stride,
  193. src_width * sizeof(uint8_t));
  194. // And pad.
  195. dstp += (6 + off) * dst_stride;
  196. for (y = 6 + off; y < dst_height - 6; y += 2) {
  197. int c = 2;
  198. for (x = 0; x < 32; x++)
  199. dstp[x] = dstp[64 - x];
  200. for (x = dst_width - 32; x < dst_width; x++, c += 2)
  201. dstp[x] = dstp[x - c];
  202. dstp += dst_stride * 2;
  203. }
  204. dstp = (uint8_t *)frame_data->paddedp[plane];
  205. for (y = off; y < 6; y += 2)
  206. memcpy(dstp + y * dst_stride,
  207. dstp + (12 + 2 * off - y) * dst_stride,
  208. dst_width * sizeof(uint8_t));
  209. for (y = dst_height - 6 + off; y < dst_height; y += 2, c += 4)
  210. memcpy(dstp + y * dst_stride,
  211. dstp + (y - c) * dst_stride,
  212. dst_width * sizeof(uint8_t));
  213. }
  214. }
  215. static void elliott(float *data, const int n)
  216. {
  217. int i;
  218. for (i = 0; i < n; i++)
  219. data[i] = data[i] / (1.0f + FFABS(data[i]));
  220. }
  221. static void dot_prod(NNEDIContext *s, const float *data, const float *weights, float *vals, const int n, const int len, const float *scale)
  222. {
  223. int i;
  224. for (i = 0; i < n; i++) {
  225. float sum;
  226. sum = s->fdsp->scalarproduct_float(data, &weights[i * len], len);
  227. vals[i] = sum * scale[0] + weights[n * len + i];
  228. }
  229. }
  230. static void dot_prods(NNEDIContext *s, const float *dataf, const float *weightsf, float *vals, const int n, const int len, const float *scale)
  231. {
  232. const int16_t *data = (int16_t *)dataf;
  233. const int16_t *weights = (int16_t *)weightsf;
  234. const float *wf = (float *)&weights[n * len];
  235. int i, j;
  236. for (i = 0; i < n; i++) {
  237. int sum = 0, off = ((i >> 2) << 3) + (i & 3);
  238. for (j = 0; j < len; j++)
  239. sum += data[j] * weights[i * len + j];
  240. vals[i] = sum * wf[off] * scale[0] + wf[off + 4];
  241. }
  242. }
  243. static void compute_network0(NNEDIContext *s, const float *input, const float *weights, uint8_t *d)
  244. {
  245. float t, temp[12], scale = 1.0f;
  246. dot_prod(s, input, weights, temp, 4, 48, &scale);
  247. t = temp[0];
  248. elliott(temp, 4);
  249. temp[0] = t;
  250. dot_prod(s, temp, weights + 4 * 49, temp + 4, 4, 4, &scale);
  251. elliott(temp + 4, 4);
  252. dot_prod(s, temp, weights + 4 * 49 + 4 * 5, temp + 8, 4, 8, &scale);
  253. if (FFMAX(temp[10], temp[11]) <= FFMAX(temp[8], temp[9]))
  254. d[0] = 1;
  255. else
  256. d[0] = 0;
  257. }
  258. static void compute_network0_i16(NNEDIContext *s, const float *inputf, const float *weightsf, uint8_t *d)
  259. {
  260. const float *wf = weightsf + 2 * 48;
  261. float t, temp[12], scale = 1.0f;
  262. dot_prods(s, inputf, weightsf, temp, 4, 48, &scale);
  263. t = temp[0];
  264. elliott(temp, 4);
  265. temp[0] = t;
  266. dot_prod(s, temp, wf + 8, temp + 4, 4, 4, &scale);
  267. elliott(temp + 4, 4);
  268. dot_prod(s, temp, wf + 8 + 4 * 5, temp + 8, 4, 8, &scale);
  269. if (FFMAX(temp[10], temp[11]) <= FFMAX(temp[8], temp[9]))
  270. d[0] = 1;
  271. else
  272. d[0] = 0;
  273. }
  274. static void pixel2float48(const uint8_t *t8, const int pitch, float *p)
  275. {
  276. const uint8_t *t = (const uint8_t *)t8;
  277. int y, x;
  278. for (y = 0; y < 4; y++)
  279. for (x = 0; x < 12; x++)
  280. p[y * 12 + x] = t[y * pitch * 2 + x];
  281. }
  282. static void byte2word48(const uint8_t *t, const int pitch, float *pf)
  283. {
  284. int16_t *p = (int16_t *)pf;
  285. int y, x;
  286. for (y = 0; y < 4; y++)
  287. for (x = 0; x < 12; x++)
  288. p[y * 12 + x] = t[y * pitch * 2 + x];
  289. }
  290. static int32_t process_line0(const uint8_t *tempu, int width, uint8_t *dstp8, const uint8_t *src3p8, const int src_pitch, const int max_value, const int chroma)
  291. {
  292. uint8_t *dstp = (uint8_t *)dstp8;
  293. const uint8_t *src3p = (const uint8_t *)src3p8;
  294. int minimum = 0;
  295. int maximum = max_value - 1; // Technically the -1 is only needed for 8 and 16 bit input.
  296. int count = 0, x;
  297. for (x = 0; x < width; x++) {
  298. if (tempu[x]) {
  299. int tmp = 19 * (src3p[x + src_pitch * 2] + src3p[x + src_pitch * 4]) - 3 * (src3p[x] + src3p[x + src_pitch * 6]);
  300. tmp /= 32;
  301. dstp[x] = FFMAX(FFMIN(tmp, maximum), minimum);
  302. } else {
  303. dstp[x] = 255;
  304. count++;
  305. }
  306. }
  307. return count;
  308. }
  309. // new prescreener functions
  310. static void byte2word64(const uint8_t *t, const int pitch, float *p)
  311. {
  312. int16_t *ps = (int16_t *)p;
  313. int y, x;
  314. for (y = 0; y < 4; y++)
  315. for (x = 0; x < 16; x++)
  316. ps[y * 16 + x] = t[y * pitch * 2 + x];
  317. }
  318. static void compute_network0new(NNEDIContext *s, const float *datai, const float *weights, uint8_t *d)
  319. {
  320. int16_t *data = (int16_t *)datai;
  321. int16_t *ws = (int16_t *)weights;
  322. float *wf = (float *)&ws[4 * 64];
  323. float vals[8];
  324. int mask, i, j;
  325. for (i = 0; i < 4; i++) {
  326. int sum = 0;
  327. float t;
  328. for (j = 0; j < 64; j++)
  329. sum += data[j] * ws[(i << 3) + ((j >> 3) << 5) + (j & 7)];
  330. t = sum * wf[i] + wf[4 + i];
  331. vals[i] = t / (1.0f + FFABS(t));
  332. }
  333. for (i = 0; i < 4; i++) {
  334. float sum = 0.0f;
  335. for (j = 0; j < 4; j++)
  336. sum += vals[j] * wf[8 + i + (j << 2)];
  337. vals[4 + i] = sum + wf[8 + 16 + i];
  338. }
  339. mask = 0;
  340. for (i = 0; i < 4; i++) {
  341. if (vals[4 + i] > 0.0f)
  342. mask |= (0x1 << (i << 3));
  343. }
  344. ((int *)d)[0] = mask;
  345. }
  346. static void evalfunc_0(NNEDIContext *s, FrameData *frame_data)
  347. {
  348. float *input = frame_data->input;
  349. const float *weights0 = s->weights0;
  350. float *temp = frame_data->temp;
  351. uint8_t *tempu = (uint8_t *)temp;
  352. int plane, x, y;
  353. // And now the actual work.
  354. for (plane = 0; plane < s->nb_planes; plane++) {
  355. const uint8_t *srcp = (const uint8_t *)frame_data->paddedp[plane];
  356. const int src_stride = frame_data->padded_stride[plane] / sizeof(uint8_t);
  357. const int width = frame_data->padded_width[plane];
  358. const int height = frame_data->padded_height[plane];
  359. uint8_t *dstp = (uint8_t *)frame_data->dstp[plane];
  360. const int dst_stride = frame_data->dst_stride[plane] / sizeof(uint8_t);
  361. const uint8_t *src3p;
  362. int ystart, ystop;
  363. int32_t *lcount;
  364. if (!(s->process_plane & (1 << plane)))
  365. continue;
  366. for (y = 1 - frame_data->field[plane]; y < height - 12; y += 2) {
  367. memcpy(dstp + y * dst_stride,
  368. srcp + 32 + (6 + y) * src_stride,
  369. (width - 64) * sizeof(uint8_t));
  370. }
  371. ystart = 6 + frame_data->field[plane];
  372. ystop = height - 6;
  373. srcp += ystart * src_stride;
  374. dstp += (ystart - 6) * dst_stride - 32;
  375. src3p = srcp - src_stride * 3;
  376. lcount = frame_data->lcount[plane] - 6;
  377. if (s->pscrn == 1) { // original
  378. for (y = ystart; y < ystop; y += 2) {
  379. for (x = 32; x < width - 32; x++) {
  380. s->readpixels((const uint8_t *)(src3p + x - 5), src_stride, input);
  381. s->compute_network0(s, input, weights0, tempu+x);
  382. }
  383. lcount[y] += s->process_line0(tempu + 32, width - 64, (uint8_t *)(dstp + 32), (const uint8_t *)(src3p + 32), src_stride, s->max_value, plane);
  384. src3p += src_stride * 2;
  385. dstp += dst_stride * 2;
  386. }
  387. } else if (s->pscrn > 1) { // new
  388. for (y = ystart; y < ystop; y += 2) {
  389. for (x = 32; x < width - 32; x += 4) {
  390. s->readpixels((const uint8_t *)(src3p + x - 6), src_stride, input);
  391. s->compute_network0(s, input, weights0, tempu + x);
  392. }
  393. lcount[y] += s->process_line0(tempu + 32, width - 64, (uint8_t *)(dstp + 32), (const uint8_t *)(src3p + 32), src_stride, s->max_value, plane);
  394. src3p += src_stride * 2;
  395. dstp += dst_stride * 2;
  396. }
  397. } else { // no prescreening
  398. for (y = ystart; y < ystop; y += 2) {
  399. memset(dstp + 32, 255, (width - 64) * sizeof(uint8_t));
  400. lcount[y] += width - 64;
  401. dstp += dst_stride * 2;
  402. }
  403. }
  404. }
  405. }
  406. static void extract_m8(const uint8_t *srcp8, const int stride, const int xdia, const int ydia, float *mstd, float *input)
  407. {
  408. // uint8_t or uint16_t or float
  409. const uint8_t *srcp = (const uint8_t *)srcp8;
  410. float scale;
  411. double tmp;
  412. // int32_t or int64_t or double
  413. int64_t sum = 0, sumsq = 0;
  414. int y, x;
  415. for (y = 0; y < ydia; y++) {
  416. const uint8_t *srcpT = srcp + y * stride * 2;
  417. for (x = 0; x < xdia; x++) {
  418. sum += srcpT[x];
  419. sumsq += (uint32_t)srcpT[x] * (uint32_t)srcpT[x];
  420. input[x] = srcpT[x];
  421. }
  422. input += xdia;
  423. }
  424. scale = 1.0f / (xdia * ydia);
  425. mstd[0] = sum * scale;
  426. tmp = (double)sumsq * scale - (double)mstd[0] * mstd[0];
  427. mstd[3] = 0.0f;
  428. if (tmp <= FLT_EPSILON)
  429. mstd[1] = mstd[2] = 0.0f;
  430. else {
  431. mstd[1] = sqrt(tmp);
  432. mstd[2] = 1.0f / mstd[1];
  433. }
  434. }
  435. static void extract_m8_i16(const uint8_t *srcp, const int stride, const int xdia, const int ydia, float *mstd, float *inputf)
  436. {
  437. int16_t *input = (int16_t *)inputf;
  438. float scale;
  439. int sum = 0, sumsq = 0;
  440. int y, x;
  441. for (y = 0; y < ydia; y++) {
  442. const uint8_t *srcpT = srcp + y * stride * 2;
  443. for (x = 0; x < xdia; x++) {
  444. sum += srcpT[x];
  445. sumsq += srcpT[x] * srcpT[x];
  446. input[x] = srcpT[x];
  447. }
  448. input += xdia;
  449. }
  450. scale = 1.0f / (float)(xdia * ydia);
  451. mstd[0] = sum * scale;
  452. mstd[1] = sumsq * scale - mstd[0] * mstd[0];
  453. mstd[3] = 0.0f;
  454. if (mstd[1] <= FLT_EPSILON)
  455. mstd[1] = mstd[2] = 0.0f;
  456. else {
  457. mstd[1] = sqrt(mstd[1]);
  458. mstd[2] = 1.0f / mstd[1];
  459. }
  460. }
  461. static const float exp_lo = -80.0f;
  462. static const float exp_hi = +80.0f;
  463. static void e2_m16(float *s, const int n)
  464. {
  465. int i;
  466. for (i = 0; i < n; i++)
  467. s[i] = exp(av_clipf(s[i], exp_lo, exp_hi));
  468. }
  469. const float min_weight_sum = 1e-10f;
  470. static void weighted_avg_elliott_mul5_m16(const float *w, const int n, float *mstd)
  471. {
  472. float vsum = 0.0f, wsum = 0.0f;
  473. int i;
  474. for (i = 0; i < n; i++) {
  475. vsum += w[i] * (w[n + i] / (1.0f + FFABS(w[n + i])));
  476. wsum += w[i];
  477. }
  478. if (wsum > min_weight_sum)
  479. mstd[3] += ((5.0f * vsum) / wsum) * mstd[1] + mstd[0];
  480. else
  481. mstd[3] += mstd[0];
  482. }
  483. static void evalfunc_1(NNEDIContext *s, FrameData *frame_data)
  484. {
  485. float *input = frame_data->input;
  486. float *temp = frame_data->temp;
  487. float **weights1 = s->weights1;
  488. const int qual = s->qual;
  489. const int asize = s->asize;
  490. const int nns = s->nns;
  491. const int xdia = s->xdia;
  492. const int xdiad2m1 = (xdia / 2) - 1;
  493. const int ydia = s->ydia;
  494. const float scale = 1.0f / (float)qual;
  495. int plane, y, x, i;
  496. for (plane = 0; plane < s->nb_planes; plane++) {
  497. const uint8_t *srcp = (const uint8_t *)frame_data->paddedp[plane];
  498. const int src_stride = frame_data->padded_stride[plane] / sizeof(uint8_t);
  499. const int width = frame_data->padded_width[plane];
  500. const int height = frame_data->padded_height[plane];
  501. uint8_t *dstp = (uint8_t *)frame_data->dstp[plane];
  502. const int dst_stride = frame_data->dst_stride[plane] / sizeof(uint8_t);
  503. const int ystart = frame_data->field[plane];
  504. const int ystop = height - 12;
  505. const uint8_t *srcpp;
  506. if (!(s->process_plane & (1 << plane)))
  507. continue;
  508. srcp += (ystart + 6) * src_stride;
  509. dstp += ystart * dst_stride - 32;
  510. srcpp = srcp - (ydia - 1) * src_stride - xdiad2m1;
  511. for (y = ystart; y < ystop; y += 2) {
  512. for (x = 32; x < width - 32; x++) {
  513. float mstd[4];
  514. if (dstp[x] != 255)
  515. continue;
  516. s->extract((const uint8_t *)(srcpp + x), src_stride, xdia, ydia, mstd, input);
  517. for (i = 0; i < qual; i++) {
  518. s->dot_prod(s, input, weights1[i], temp, nns * 2, asize, mstd + 2);
  519. s->expfunc(temp, nns);
  520. s->wae5(temp, nns, mstd);
  521. }
  522. dstp[x] = FFMIN(FFMAX((int)(mstd[3] * scale + 0.5f), 0), s->max_value);
  523. }
  524. srcpp += src_stride * 2;
  525. dstp += dst_stride * 2;
  526. }
  527. }
  528. }
  529. #define NUM_NSIZE 7
  530. #define NUM_NNS 5
  531. static int roundds(const double f)
  532. {
  533. if (f - floor(f) >= 0.5)
  534. return FFMIN((int)ceil(f), 32767);
  535. return FFMAX((int)floor(f), -32768);
  536. }
  537. static void select_functions(NNEDIContext *s)
  538. {
  539. s->copy_pad = copy_pad;
  540. s->evalfunc_0 = evalfunc_0;
  541. s->evalfunc_1 = evalfunc_1;
  542. // evalfunc_0
  543. s->process_line0 = process_line0;
  544. if (s->pscrn < 2) { // original prescreener
  545. if (s->fapprox & 1) { // int16 dot products
  546. s->readpixels = byte2word48;
  547. s->compute_network0 = compute_network0_i16;
  548. } else {
  549. s->readpixels = pixel2float48;
  550. s->compute_network0 = compute_network0;
  551. }
  552. } else { // new prescreener
  553. // only int16 dot products
  554. s->readpixels = byte2word64;
  555. s->compute_network0 = compute_network0new;
  556. }
  557. // evalfunc_1
  558. s->wae5 = weighted_avg_elliott_mul5_m16;
  559. if (s->fapprox & 2) { // use int16 dot products
  560. s->extract = extract_m8_i16;
  561. s->dot_prod = dot_prods;
  562. } else { // use float dot products
  563. s->extract = extract_m8;
  564. s->dot_prod = dot_prod;
  565. }
  566. s->expfunc = e2_m16;
  567. }
  568. static int modnpf(const int m, const int n)
  569. {
  570. if ((m % n) == 0)
  571. return m;
  572. return m + n - (m % n);
  573. }
  574. static int get_frame(AVFilterContext *ctx, int is_second)
  575. {
  576. NNEDIContext *s = ctx->priv;
  577. AVFilterLink *outlink = ctx->outputs[0];
  578. AVFrame *src = s->src;
  579. FrameData *frame_data;
  580. int effective_field = s->field;
  581. size_t temp_size;
  582. int field_n;
  583. int plane;
  584. if (effective_field > 1)
  585. effective_field -= 2;
  586. else if (effective_field < 0)
  587. effective_field += 2;
  588. if (s->field < 0 && src->interlaced_frame && src->top_field_first == 0)
  589. effective_field = 0;
  590. else if (s->field < 0 && src->interlaced_frame && src->top_field_first == 1)
  591. effective_field = 1;
  592. else
  593. effective_field = !effective_field;
  594. if (s->field > 1 || s->field == -2) {
  595. if (is_second) {
  596. field_n = (effective_field == 0);
  597. } else {
  598. field_n = (effective_field == 1);
  599. }
  600. } else {
  601. field_n = effective_field;
  602. }
  603. s->dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  604. if (!s->dst)
  605. return AVERROR(ENOMEM);
  606. av_frame_copy_props(s->dst, src);
  607. s->dst->interlaced_frame = 0;
  608. frame_data = &s->frame_data;
  609. for (plane = 0; plane < s->nb_planes; plane++) {
  610. int dst_height = s->planeheight[plane];
  611. int dst_width = s->linesize[plane];
  612. const int min_alignment = 16;
  613. const int min_pad = 10;
  614. if (!(s->process_plane & (1 << plane))) {
  615. av_image_copy_plane(s->dst->data[plane], s->dst->linesize[plane],
  616. src->data[plane], src->linesize[plane],
  617. s->linesize[plane],
  618. s->planeheight[plane]);
  619. continue;
  620. }
  621. frame_data->padded_width[plane] = dst_width + 64;
  622. frame_data->padded_height[plane] = dst_height + 12;
  623. frame_data->padded_stride[plane] = modnpf(frame_data->padded_width[plane] + min_pad, min_alignment); // TODO: maybe min_pad is in pixels too?
  624. if (!frame_data->paddedp[plane]) {
  625. frame_data->paddedp[plane] = av_malloc_array(frame_data->padded_stride[plane], frame_data->padded_height[plane]);
  626. if (!frame_data->paddedp[plane])
  627. return AVERROR(ENOMEM);
  628. }
  629. frame_data->dstp[plane] = s->dst->data[plane];
  630. frame_data->dst_stride[plane] = s->dst->linesize[plane];
  631. if (!frame_data->lcount[plane]) {
  632. frame_data->lcount[plane] = av_calloc(dst_height, sizeof(int32_t) * 16);
  633. if (!frame_data->lcount[plane])
  634. return AVERROR(ENOMEM);
  635. } else {
  636. memset(frame_data->lcount[plane], 0, dst_height * sizeof(int32_t) * 16);
  637. }
  638. frame_data->field[plane] = field_n;
  639. }
  640. if (!frame_data->input) {
  641. frame_data->input = av_malloc(512 * sizeof(float));
  642. if (!frame_data->input)
  643. return AVERROR(ENOMEM);
  644. }
  645. // evalfunc_0 requires at least padded_width[0] bytes.
  646. // evalfunc_1 requires at least 512 floats.
  647. if (!frame_data->temp) {
  648. temp_size = FFMAX(frame_data->padded_width[0], 512 * sizeof(float));
  649. frame_data->temp = av_malloc(temp_size);
  650. if (!frame_data->temp)
  651. return AVERROR(ENOMEM);
  652. }
  653. // Copy src to a padded "frame" in frame_data and mirror the edges.
  654. s->copy_pad(src, frame_data, s, field_n);
  655. // Handles prescreening and the cubic interpolation.
  656. s->evalfunc_0(s, frame_data);
  657. // The rest.
  658. s->evalfunc_1(s, frame_data);
  659. return 0;
  660. }
  661. static int filter_frame(AVFilterLink *inlink, AVFrame *src)
  662. {
  663. AVFilterContext *ctx = inlink->dst;
  664. AVFilterLink *outlink = ctx->outputs[0];
  665. NNEDIContext *s = ctx->priv;
  666. int ret;
  667. if ((s->field > 1 ||
  668. s->field == -2) && !s->second) {
  669. goto second;
  670. } else if (s->field > 1 ||
  671. s->field == -2) {
  672. AVFrame *dst;
  673. s->src = s->second;
  674. ret = get_frame(ctx, 1);
  675. if (ret < 0) {
  676. av_frame_free(&s->dst);
  677. av_frame_free(&s->src);
  678. av_frame_free(&s->second);
  679. return ret;
  680. }
  681. dst = s->dst;
  682. if (src->pts != AV_NOPTS_VALUE &&
  683. dst->pts != AV_NOPTS_VALUE)
  684. dst->pts += src->pts;
  685. else
  686. dst->pts = AV_NOPTS_VALUE;
  687. ret = ff_filter_frame(outlink, dst);
  688. if (ret < 0)
  689. return ret;
  690. if (s->eof)
  691. return 0;
  692. s->cur_pts = s->second->pts;
  693. av_frame_free(&s->second);
  694. second:
  695. if ((s->deint && src->interlaced_frame &&
  696. !ctx->is_disabled) ||
  697. (!s->deint && !ctx->is_disabled)) {
  698. s->second = src;
  699. }
  700. }
  701. if ((s->deint && !src->interlaced_frame) || ctx->is_disabled) {
  702. AVFrame *dst = av_frame_clone(src);
  703. if (!dst) {
  704. av_frame_free(&src);
  705. av_frame_free(&s->second);
  706. return AVERROR(ENOMEM);
  707. }
  708. if (s->field > 1 || s->field == -2) {
  709. av_frame_free(&s->second);
  710. if ((s->deint && src->interlaced_frame) ||
  711. (!s->deint))
  712. s->second = src;
  713. } else {
  714. av_frame_free(&src);
  715. }
  716. if (dst->pts != AV_NOPTS_VALUE)
  717. dst->pts *= 2;
  718. return ff_filter_frame(outlink, dst);
  719. }
  720. s->src = src;
  721. ret = get_frame(ctx, 0);
  722. if (ret < 0) {
  723. av_frame_free(&s->dst);
  724. av_frame_free(&s->src);
  725. av_frame_free(&s->second);
  726. return ret;
  727. }
  728. if (src->pts != AV_NOPTS_VALUE)
  729. s->dst->pts = src->pts * 2;
  730. if (s->field <= 1 && s->field > -2) {
  731. av_frame_free(&src);
  732. s->src = NULL;
  733. }
  734. return ff_filter_frame(outlink, s->dst);
  735. }
  736. static int request_frame(AVFilterLink *link)
  737. {
  738. AVFilterContext *ctx = link->src;
  739. NNEDIContext *s = ctx->priv;
  740. int ret;
  741. if (s->eof)
  742. return AVERROR_EOF;
  743. ret = ff_request_frame(ctx->inputs[0]);
  744. if (ret == AVERROR_EOF && s->second) {
  745. AVFrame *next = av_frame_clone(s->second);
  746. if (!next)
  747. return AVERROR(ENOMEM);
  748. next->pts = s->second->pts * 2 - s->cur_pts;
  749. s->eof = 1;
  750. filter_frame(ctx->inputs[0], next);
  751. } else if (ret < 0) {
  752. return ret;
  753. }
  754. return 0;
  755. }
  756. static av_cold int init(AVFilterContext *ctx)
  757. {
  758. NNEDIContext *s = ctx->priv;
  759. FILE *weights_file = NULL;
  760. int64_t expected_size = 13574928;
  761. int64_t weights_size;
  762. float *bdata;
  763. size_t bytes_read;
  764. const int xdia_table[NUM_NSIZE] = { 8, 16, 32, 48, 8, 16, 32 };
  765. const int ydia_table[NUM_NSIZE] = { 6, 6, 6, 6, 4, 4, 4 };
  766. const int nns_table[NUM_NNS] = { 16, 32, 64, 128, 256 };
  767. const int dims0 = 49 * 4 + 5 * 4 + 9 * 4;
  768. const int dims0new = 4 * 65 + 4 * 5;
  769. const int dims1 = nns_table[s->nnsparam] * 2 * (xdia_table[s->nsize] * ydia_table[s->nsize] + 1);
  770. int dims1tsize = 0;
  771. int dims1offset = 0;
  772. int ret = 0, i, j, k;
  773. weights_file = fopen(s->weights_file, "rb");
  774. if (!weights_file) {
  775. av_log(ctx, AV_LOG_ERROR, "No weights file provided, aborting!\n");
  776. return AVERROR(EINVAL);
  777. }
  778. if (fseek(weights_file, 0, SEEK_END)) {
  779. av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the end of weights file.\n");
  780. fclose(weights_file);
  781. return AVERROR(EINVAL);
  782. }
  783. weights_size = ftell(weights_file);
  784. if (weights_size == -1) {
  785. fclose(weights_file);
  786. av_log(ctx, AV_LOG_ERROR, "Couldn't get size of weights file.\n");
  787. return AVERROR(EINVAL);
  788. } else if (weights_size != expected_size) {
  789. fclose(weights_file);
  790. av_log(ctx, AV_LOG_ERROR, "Unexpected weights file size.\n");
  791. return AVERROR(EINVAL);
  792. }
  793. if (fseek(weights_file, 0, SEEK_SET)) {
  794. fclose(weights_file);
  795. av_log(ctx, AV_LOG_ERROR, "Couldn't seek to the start of weights file.\n");
  796. return AVERROR(EINVAL);
  797. }
  798. bdata = (float *)av_malloc(expected_size);
  799. if (!bdata) {
  800. fclose(weights_file);
  801. return AVERROR(ENOMEM);
  802. }
  803. bytes_read = fread(bdata, 1, expected_size, weights_file);
  804. if (bytes_read != (size_t)expected_size) {
  805. fclose(weights_file);
  806. ret = AVERROR_INVALIDDATA;
  807. av_log(ctx, AV_LOG_ERROR, "Couldn't read weights file.\n");
  808. goto fail;
  809. }
  810. fclose(weights_file);
  811. for (j = 0; j < NUM_NNS; j++) {
  812. for (i = 0; i < NUM_NSIZE; i++) {
  813. if (i == s->nsize && j == s->nnsparam)
  814. dims1offset = dims1tsize;
  815. dims1tsize += nns_table[j] * 2 * (xdia_table[i] * ydia_table[i] + 1) * 2;
  816. }
  817. }
  818. s->weights0 = av_malloc_array(FFMAX(dims0, dims0new), sizeof(float));
  819. if (!s->weights0) {
  820. ret = AVERROR(ENOMEM);
  821. goto fail;
  822. }
  823. for (i = 0; i < 2; i++) {
  824. s->weights1[i] = av_malloc_array(dims1, sizeof(float));
  825. if (!s->weights1[i]) {
  826. ret = AVERROR(ENOMEM);
  827. goto fail;
  828. }
  829. }
  830. // Adjust prescreener weights
  831. if (s->pscrn >= 2) {// using new prescreener
  832. const float *bdw;
  833. int16_t *ws;
  834. float *wf;
  835. double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
  836. int *offt = av_calloc(4 * 64, sizeof(int));
  837. if (!offt) {
  838. ret = AVERROR(ENOMEM);
  839. goto fail;
  840. }
  841. for (j = 0; j < 4; j++)
  842. for (k = 0; k < 64; k++)
  843. offt[j * 64 + k] = ((k >> 3) << 5) + ((j & 3) << 3) + (k & 7);
  844. bdw = bdata + dims0 + dims0new * (s->pscrn - 2);
  845. ws = (int16_t *)s->weights0;
  846. wf = (float *)&ws[4 * 64];
  847. // Calculate mean weight of each first layer neuron
  848. for (j = 0; j < 4; j++) {
  849. double cmean = 0.0;
  850. for (k = 0; k < 64; k++)
  851. cmean += bdw[offt[j * 64 + k]];
  852. mean[j] = cmean / 64.0;
  853. }
  854. // Factor mean removal and 1.0/127.5 scaling
  855. // into first layer weights. scale to int16 range
  856. for (j = 0; j < 4; j++) {
  857. double scale, mval = 0.0;
  858. for (k = 0; k < 64; k++)
  859. mval = FFMAX(mval, FFABS((bdw[offt[j * 64 + k]] - mean[j]) / 127.5));
  860. scale = 32767.0 / mval;
  861. for (k = 0; k < 64; k++)
  862. ws[offt[j * 64 + k]] = roundds(((bdw[offt[j * 64 + k]] - mean[j]) / 127.5) * scale);
  863. wf[j] = (float)(mval / 32767.0);
  864. }
  865. memcpy(wf + 4, bdw + 4 * 64, (dims0new - 4 * 64) * sizeof(float));
  866. av_free(offt);
  867. } else { // using old prescreener
  868. double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
  869. // Calculate mean weight of each first layer neuron
  870. for (j = 0; j < 4; j++) {
  871. double cmean = 0.0;
  872. for (k = 0; k < 48; k++)
  873. cmean += bdata[j * 48 + k];
  874. mean[j] = cmean / 48.0;
  875. }
  876. if (s->fapprox & 1) {// use int16 dot products in first layer
  877. int16_t *ws = (int16_t *)s->weights0;
  878. float *wf = (float *)&ws[4 * 48];
  879. // Factor mean removal and 1.0/127.5 scaling
  880. // into first layer weights. scale to int16 range
  881. for (j = 0; j < 4; j++) {
  882. double scale, mval = 0.0;
  883. for (k = 0; k < 48; k++)
  884. mval = FFMAX(mval, FFABS((bdata[j * 48 + k] - mean[j]) / 127.5));
  885. scale = 32767.0 / mval;
  886. for (k = 0; k < 48; k++)
  887. ws[j * 48 + k] = roundds(((bdata[j * 48 + k] - mean[j]) / 127.5) * scale);
  888. wf[j] = (float)(mval / 32767.0);
  889. }
  890. memcpy(wf + 4, bdata + 4 * 48, (dims0 - 4 * 48) * sizeof(float));
  891. } else {// use float dot products in first layer
  892. double half = (1 << 8) - 1;
  893. half /= 2;
  894. // Factor mean removal and 1.0/half scaling
  895. // into first layer weights.
  896. for (j = 0; j < 4; j++)
  897. for (k = 0; k < 48; k++)
  898. s->weights0[j * 48 + k] = (float)((bdata[j * 48 + k] - mean[j]) / half);
  899. memcpy(s->weights0 + 4 * 48, bdata + 4 * 48, (dims0 - 4 * 48) * sizeof(float));
  900. }
  901. }
  902. // Adjust prediction weights
  903. for (i = 0; i < 2; i++) {
  904. const float *bdataT = bdata + dims0 + dims0new * 3 + dims1tsize * s->etype + dims1offset + i * dims1;
  905. const int nnst = nns_table[s->nnsparam];
  906. const int asize = xdia_table[s->nsize] * ydia_table[s->nsize];
  907. const int boff = nnst * 2 * asize;
  908. double *mean = (double *)av_calloc(asize + 1 + nnst * 2, sizeof(double));
  909. if (!mean) {
  910. ret = AVERROR(ENOMEM);
  911. goto fail;
  912. }
  913. // Calculate mean weight of each neuron (ignore bias)
  914. for (j = 0; j < nnst * 2; j++) {
  915. double cmean = 0.0;
  916. for (k = 0; k < asize; k++)
  917. cmean += bdataT[j * asize + k];
  918. mean[asize + 1 + j] = cmean / (double)asize;
  919. }
  920. // Calculate mean softmax neuron
  921. for (j = 0; j < nnst; j++) {
  922. for (k = 0; k < asize; k++)
  923. mean[k] += bdataT[j * asize + k] - mean[asize + 1 + j];
  924. mean[asize] += bdataT[boff + j];
  925. }
  926. for (j = 0; j < asize + 1; j++)
  927. mean[j] /= (double)(nnst);
  928. if (s->fapprox & 2) { // use int16 dot products
  929. int16_t *ws = (int16_t *)s->weights1[i];
  930. float *wf = (float *)&ws[nnst * 2 * asize];
  931. // Factor mean removal into weights, remove global offset from
  932. // softmax neurons, and scale weights to int16 range.
  933. for (j = 0; j < nnst; j++) { // softmax neurons
  934. double scale, mval = 0.0;
  935. for (k = 0; k < asize; k++)
  936. mval = FFMAX(mval, FFABS(bdataT[j * asize + k] - mean[asize + 1 + j] - mean[k]));
  937. scale = 32767.0 / mval;
  938. for (k = 0; k < asize; k++)
  939. ws[j * asize + k] = roundds((bdataT[j * asize + k] - mean[asize + 1 + j] - mean[k]) * scale);
  940. wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
  941. wf[(j >> 2) * 8 + (j & 3) + 4] = (float)(bdataT[boff + j] - mean[asize]);
  942. }
  943. for (j = nnst; j < nnst * 2; j++) { // elliott neurons
  944. double scale, mval = 0.0;
  945. for (k = 0; k < asize; k++)
  946. mval = FFMAX(mval, FFABS(bdataT[j * asize + k] - mean[asize + 1 + j]));
  947. scale = 32767.0 / mval;
  948. for (k = 0; k < asize; k++)
  949. ws[j * asize + k] = roundds((bdataT[j * asize + k] - mean[asize + 1 + j]) * scale);
  950. wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
  951. wf[(j >> 2) * 8 + (j & 3) + 4] = bdataT[boff + j];
  952. }
  953. } else { // use float dot products
  954. // Factor mean removal into weights, and remove global
  955. // offset from softmax neurons.
  956. for (j = 0; j < nnst * 2; j++) {
  957. for (k = 0; k < asize; k++) {
  958. const double q = j < nnst ? mean[k] : 0.0;
  959. s->weights1[i][j * asize + k] = (float)(bdataT[j * asize + k] - mean[asize + 1 + j] - q);
  960. }
  961. s->weights1[i][boff + j] = (float)(bdataT[boff + j] - (j < nnst ? mean[asize] : 0.0));
  962. }
  963. }
  964. av_free(mean);
  965. }
  966. s->nns = nns_table[s->nnsparam];
  967. s->xdia = xdia_table[s->nsize];
  968. s->ydia = ydia_table[s->nsize];
  969. s->asize = xdia_table[s->nsize] * ydia_table[s->nsize];
  970. s->max_value = 65535 >> 8;
  971. select_functions(s);
  972. s->fdsp = avpriv_float_dsp_alloc(0);
  973. if (!s->fdsp)
  974. ret = AVERROR(ENOMEM);
  975. fail:
  976. av_free(bdata);
  977. return ret;
  978. }
  979. static av_cold void uninit(AVFilterContext *ctx)
  980. {
  981. NNEDIContext *s = ctx->priv;
  982. int i;
  983. av_freep(&s->weights0);
  984. for (i = 0; i < 2; i++)
  985. av_freep(&s->weights1[i]);
  986. for (i = 0; i < s->nb_planes; i++) {
  987. av_freep(&s->frame_data.paddedp[i]);
  988. av_freep(&s->frame_data.lcount[i]);
  989. }
  990. av_freep(&s->frame_data.input);
  991. av_freep(&s->frame_data.temp);
  992. av_freep(&s->fdsp);
  993. av_frame_free(&s->second);
  994. }
  995. static const AVFilterPad inputs[] = {
  996. {
  997. .name = "default",
  998. .type = AVMEDIA_TYPE_VIDEO,
  999. .filter_frame = filter_frame,
  1000. .config_props = config_input,
  1001. },
  1002. { NULL }
  1003. };
  1004. static const AVFilterPad outputs[] = {
  1005. {
  1006. .name = "default",
  1007. .type = AVMEDIA_TYPE_VIDEO,
  1008. .config_props = config_output,
  1009. .request_frame = request_frame,
  1010. },
  1011. { NULL }
  1012. };
  1013. AVFilter ff_vf_nnedi = {
  1014. .name = "nnedi",
  1015. .description = NULL_IF_CONFIG_SMALL("Apply neural network edge directed interpolation intra-only deinterlacer."),
  1016. .priv_size = sizeof(NNEDIContext),
  1017. .priv_class = &nnedi_class,
  1018. .init = init,
  1019. .uninit = uninit,
  1020. .query_formats = query_formats,
  1021. .inputs = inputs,
  1022. .outputs = outputs,
  1023. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
  1024. };