imgresample.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /*
  2. * High quality image resampling with polyphase filters
  3. * Copyright (c) 2001 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgresample.c
  23. * High quality image resampling with polyphase filters .
  24. */
  25. #include "avcodec.h"
  26. #include "swscale.h"
  27. #include "dsputil.h"
  28. #ifdef USE_FASTMEMCPY
  29. #include "libvo/fastmemcpy.h"
  30. #endif
  31. #define NB_COMPONENTS 3
  32. #define PHASE_BITS 4
  33. #define NB_PHASES (1 << PHASE_BITS)
  34. #define NB_TAPS 4
  35. #define FCENTER 1 /* index of the center of the filter */
  36. //#define TEST 1 /* Test it */
  37. #define POS_FRAC_BITS 16
  38. #define POS_FRAC (1 << POS_FRAC_BITS)
  39. /* 6 bits precision is needed for MMX */
  40. #define FILTER_BITS 8
  41. #define LINE_BUF_HEIGHT (NB_TAPS * 4)
  42. struct SwsContext {
  43. struct ImgReSampleContext *resampling_ctx;
  44. enum PixelFormat src_pix_fmt, dst_pix_fmt;
  45. };
  46. struct ImgReSampleContext {
  47. int iwidth, iheight, owidth, oheight;
  48. int topBand, bottomBand, leftBand, rightBand;
  49. int padtop, padbottom, padleft, padright;
  50. int pad_owidth, pad_oheight;
  51. int h_incr, v_incr;
  52. DECLARE_ALIGNED_8(int16_t, h_filters[NB_PHASES][NB_TAPS]); /* horizontal filters */
  53. DECLARE_ALIGNED_8(int16_t, v_filters[NB_PHASES][NB_TAPS]); /* vertical filters */
  54. uint8_t *line_buf;
  55. };
  56. void av_build_filter(int16_t *filter, double factor, int tap_count, int phase_count, int scale, int type);
  57. static inline int get_phase(int pos)
  58. {
  59. return ((pos) >> (POS_FRAC_BITS - PHASE_BITS)) & ((1 << PHASE_BITS) - 1);
  60. }
  61. /* This function must be optimized */
  62. static void h_resample_fast(uint8_t *dst, int dst_width, const uint8_t *src,
  63. int src_width, int src_start, int src_incr,
  64. int16_t *filters)
  65. {
  66. int src_pos, phase, sum, i;
  67. const uint8_t *s;
  68. int16_t *filter;
  69. src_pos = src_start;
  70. for(i=0;i<dst_width;i++) {
  71. #ifdef TEST
  72. /* test */
  73. if ((src_pos >> POS_FRAC_BITS) < 0 ||
  74. (src_pos >> POS_FRAC_BITS) > (src_width - NB_TAPS))
  75. av_abort();
  76. #endif
  77. s = src + (src_pos >> POS_FRAC_BITS);
  78. phase = get_phase(src_pos);
  79. filter = filters + phase * NB_TAPS;
  80. #if NB_TAPS == 4
  81. sum = s[0] * filter[0] +
  82. s[1] * filter[1] +
  83. s[2] * filter[2] +
  84. s[3] * filter[3];
  85. #else
  86. {
  87. int j;
  88. sum = 0;
  89. for(j=0;j<NB_TAPS;j++)
  90. sum += s[j] * filter[j];
  91. }
  92. #endif
  93. sum = sum >> FILTER_BITS;
  94. if (sum < 0)
  95. sum = 0;
  96. else if (sum > 255)
  97. sum = 255;
  98. dst[0] = sum;
  99. src_pos += src_incr;
  100. dst++;
  101. }
  102. }
  103. /* This function must be optimized */
  104. static void v_resample(uint8_t *dst, int dst_width, const uint8_t *src,
  105. int wrap, int16_t *filter)
  106. {
  107. int sum, i;
  108. const uint8_t *s;
  109. s = src;
  110. for(i=0;i<dst_width;i++) {
  111. #if NB_TAPS == 4
  112. sum = s[0 * wrap] * filter[0] +
  113. s[1 * wrap] * filter[1] +
  114. s[2 * wrap] * filter[2] +
  115. s[3 * wrap] * filter[3];
  116. #else
  117. {
  118. int j;
  119. uint8_t *s1 = s;
  120. sum = 0;
  121. for(j=0;j<NB_TAPS;j++) {
  122. sum += s1[0] * filter[j];
  123. s1 += wrap;
  124. }
  125. }
  126. #endif
  127. sum = sum >> FILTER_BITS;
  128. if (sum < 0)
  129. sum = 0;
  130. else if (sum > 255)
  131. sum = 255;
  132. dst[0] = sum;
  133. dst++;
  134. s++;
  135. }
  136. }
  137. #ifdef HAVE_MMX
  138. #include "i386/mmx.h"
  139. #define FILTER4(reg) \
  140. {\
  141. s = src + (src_pos >> POS_FRAC_BITS);\
  142. phase = get_phase(src_pos);\
  143. filter = filters + phase * NB_TAPS;\
  144. movq_m2r(*s, reg);\
  145. punpcklbw_r2r(mm7, reg);\
  146. movq_m2r(*filter, mm6);\
  147. pmaddwd_r2r(reg, mm6);\
  148. movq_r2r(mm6, reg);\
  149. psrlq_i2r(32, reg);\
  150. paddd_r2r(mm6, reg);\
  151. psrad_i2r(FILTER_BITS, reg);\
  152. src_pos += src_incr;\
  153. }
  154. #define DUMP(reg) movq_r2m(reg, tmp); printf(#reg "=%016"PRIx64"\n", tmp.uq);
  155. /* XXX: do four pixels at a time */
  156. static void h_resample_fast4_mmx(uint8_t *dst, int dst_width,
  157. const uint8_t *src, int src_width,
  158. int src_start, int src_incr, int16_t *filters)
  159. {
  160. int src_pos, phase;
  161. const uint8_t *s;
  162. int16_t *filter;
  163. mmx_t tmp;
  164. src_pos = src_start;
  165. pxor_r2r(mm7, mm7);
  166. while (dst_width >= 4) {
  167. FILTER4(mm0);
  168. FILTER4(mm1);
  169. FILTER4(mm2);
  170. FILTER4(mm3);
  171. packuswb_r2r(mm7, mm0);
  172. packuswb_r2r(mm7, mm1);
  173. packuswb_r2r(mm7, mm3);
  174. packuswb_r2r(mm7, mm2);
  175. movq_r2m(mm0, tmp);
  176. dst[0] = tmp.ub[0];
  177. movq_r2m(mm1, tmp);
  178. dst[1] = tmp.ub[0];
  179. movq_r2m(mm2, tmp);
  180. dst[2] = tmp.ub[0];
  181. movq_r2m(mm3, tmp);
  182. dst[3] = tmp.ub[0];
  183. dst += 4;
  184. dst_width -= 4;
  185. }
  186. while (dst_width > 0) {
  187. FILTER4(mm0);
  188. packuswb_r2r(mm7, mm0);
  189. movq_r2m(mm0, tmp);
  190. dst[0] = tmp.ub[0];
  191. dst++;
  192. dst_width--;
  193. }
  194. emms();
  195. }
  196. static void v_resample4_mmx(uint8_t *dst, int dst_width, const uint8_t *src,
  197. int wrap, int16_t *filter)
  198. {
  199. int sum, i, v;
  200. const uint8_t *s;
  201. mmx_t tmp;
  202. mmx_t coefs[4];
  203. for(i=0;i<4;i++) {
  204. v = filter[i];
  205. coefs[i].uw[0] = v;
  206. coefs[i].uw[1] = v;
  207. coefs[i].uw[2] = v;
  208. coefs[i].uw[3] = v;
  209. }
  210. pxor_r2r(mm7, mm7);
  211. s = src;
  212. while (dst_width >= 4) {
  213. movq_m2r(s[0 * wrap], mm0);
  214. punpcklbw_r2r(mm7, mm0);
  215. movq_m2r(s[1 * wrap], mm1);
  216. punpcklbw_r2r(mm7, mm1);
  217. movq_m2r(s[2 * wrap], mm2);
  218. punpcklbw_r2r(mm7, mm2);
  219. movq_m2r(s[3 * wrap], mm3);
  220. punpcklbw_r2r(mm7, mm3);
  221. pmullw_m2r(coefs[0], mm0);
  222. pmullw_m2r(coefs[1], mm1);
  223. pmullw_m2r(coefs[2], mm2);
  224. pmullw_m2r(coefs[3], mm3);
  225. paddw_r2r(mm1, mm0);
  226. paddw_r2r(mm3, mm2);
  227. paddw_r2r(mm2, mm0);
  228. psraw_i2r(FILTER_BITS, mm0);
  229. packuswb_r2r(mm7, mm0);
  230. movq_r2m(mm0, tmp);
  231. *(uint32_t *)dst = tmp.ud[0];
  232. dst += 4;
  233. s += 4;
  234. dst_width -= 4;
  235. }
  236. while (dst_width > 0) {
  237. sum = s[0 * wrap] * filter[0] +
  238. s[1 * wrap] * filter[1] +
  239. s[2 * wrap] * filter[2] +
  240. s[3 * wrap] * filter[3];
  241. sum = sum >> FILTER_BITS;
  242. if (sum < 0)
  243. sum = 0;
  244. else if (sum > 255)
  245. sum = 255;
  246. dst[0] = sum;
  247. dst++;
  248. s++;
  249. dst_width--;
  250. }
  251. emms();
  252. }
  253. #endif
  254. #ifdef HAVE_ALTIVEC
  255. typedef union {
  256. vector unsigned char v;
  257. unsigned char c[16];
  258. } vec_uc_t;
  259. typedef union {
  260. vector signed short v;
  261. signed short s[8];
  262. } vec_ss_t;
  263. void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
  264. int wrap, int16_t *filter)
  265. {
  266. int sum, i;
  267. const uint8_t *s;
  268. vector unsigned char *tv, tmp, dstv, zero;
  269. vec_ss_t srchv[4], srclv[4], fv[4];
  270. vector signed short zeros, sumhv, sumlv;
  271. s = src;
  272. for(i=0;i<4;i++)
  273. {
  274. /*
  275. The vec_madds later on does an implicit >>15 on the result.
  276. Since FILTER_BITS is 8, and we have 15 bits of magnitude in
  277. a signed short, we have just enough bits to pre-shift our
  278. filter constants <<7 to compensate for vec_madds.
  279. */
  280. fv[i].s[0] = filter[i] << (15-FILTER_BITS);
  281. fv[i].v = vec_splat(fv[i].v, 0);
  282. }
  283. zero = vec_splat_u8(0);
  284. zeros = vec_splat_s16(0);
  285. /*
  286. When we're resampling, we'd ideally like both our input buffers,
  287. and output buffers to be 16-byte aligned, so we can do both aligned
  288. reads and writes. Sadly we can't always have this at the moment, so
  289. we opt for aligned writes, as unaligned writes have a huge overhead.
  290. To do this, do enough scalar resamples to get dst 16-byte aligned.
  291. */
  292. i = (-(int)dst) & 0xf;
  293. while(i>0) {
  294. sum = s[0 * wrap] * filter[0] +
  295. s[1 * wrap] * filter[1] +
  296. s[2 * wrap] * filter[2] +
  297. s[3 * wrap] * filter[3];
  298. sum = sum >> FILTER_BITS;
  299. if (sum<0) sum = 0; else if (sum>255) sum=255;
  300. dst[0] = sum;
  301. dst++;
  302. s++;
  303. dst_width--;
  304. i--;
  305. }
  306. /* Do our altivec resampling on 16 pixels at once. */
  307. while(dst_width>=16) {
  308. /*
  309. Read 16 (potentially unaligned) bytes from each of
  310. 4 lines into 4 vectors, and split them into shorts.
  311. Interleave the multipy/accumulate for the resample
  312. filter with the loads to hide the 3 cycle latency
  313. the vec_madds have.
  314. */
  315. tv = (vector unsigned char *) &s[0 * wrap];
  316. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
  317. srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
  318. srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
  319. sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
  320. sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
  321. tv = (vector unsigned char *) &s[1 * wrap];
  322. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
  323. srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
  324. srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
  325. sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
  326. sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
  327. tv = (vector unsigned char *) &s[2 * wrap];
  328. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
  329. srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
  330. srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
  331. sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
  332. sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
  333. tv = (vector unsigned char *) &s[3 * wrap];
  334. tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
  335. srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
  336. srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
  337. sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
  338. sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
  339. /*
  340. Pack the results into our destination vector,
  341. and do an aligned write of that back to memory.
  342. */
  343. dstv = vec_packsu(sumhv, sumlv) ;
  344. vec_st(dstv, 0, (vector unsigned char *) dst);
  345. dst+=16;
  346. s+=16;
  347. dst_width-=16;
  348. }
  349. /*
  350. If there are any leftover pixels, resample them
  351. with the slow scalar method.
  352. */
  353. while(dst_width>0) {
  354. sum = s[0 * wrap] * filter[0] +
  355. s[1 * wrap] * filter[1] +
  356. s[2 * wrap] * filter[2] +
  357. s[3 * wrap] * filter[3];
  358. sum = sum >> FILTER_BITS;
  359. if (sum<0) sum = 0; else if (sum>255) sum=255;
  360. dst[0] = sum;
  361. dst++;
  362. s++;
  363. dst_width--;
  364. }
  365. }
  366. #endif
  367. /* slow version to handle limit cases. Does not need optimisation */
  368. static void h_resample_slow(uint8_t *dst, int dst_width,
  369. const uint8_t *src, int src_width,
  370. int src_start, int src_incr, int16_t *filters)
  371. {
  372. int src_pos, phase, sum, j, v, i;
  373. const uint8_t *s, *src_end;
  374. int16_t *filter;
  375. src_end = src + src_width;
  376. src_pos = src_start;
  377. for(i=0;i<dst_width;i++) {
  378. s = src + (src_pos >> POS_FRAC_BITS);
  379. phase = get_phase(src_pos);
  380. filter = filters + phase * NB_TAPS;
  381. sum = 0;
  382. for(j=0;j<NB_TAPS;j++) {
  383. if (s < src)
  384. v = src[0];
  385. else if (s >= src_end)
  386. v = src_end[-1];
  387. else
  388. v = s[0];
  389. sum += v * filter[j];
  390. s++;
  391. }
  392. sum = sum >> FILTER_BITS;
  393. if (sum < 0)
  394. sum = 0;
  395. else if (sum > 255)
  396. sum = 255;
  397. dst[0] = sum;
  398. src_pos += src_incr;
  399. dst++;
  400. }
  401. }
  402. static void h_resample(uint8_t *dst, int dst_width, const uint8_t *src,
  403. int src_width, int src_start, int src_incr,
  404. int16_t *filters)
  405. {
  406. int n, src_end;
  407. if (src_start < 0) {
  408. n = (0 - src_start + src_incr - 1) / src_incr;
  409. h_resample_slow(dst, n, src, src_width, src_start, src_incr, filters);
  410. dst += n;
  411. dst_width -= n;
  412. src_start += n * src_incr;
  413. }
  414. src_end = src_start + dst_width * src_incr;
  415. if (src_end > ((src_width - NB_TAPS) << POS_FRAC_BITS)) {
  416. n = (((src_width - NB_TAPS + 1) << POS_FRAC_BITS) - 1 - src_start) /
  417. src_incr;
  418. } else {
  419. n = dst_width;
  420. }
  421. #ifdef HAVE_MMX
  422. if ((mm_flags & MM_MMX) && NB_TAPS == 4)
  423. h_resample_fast4_mmx(dst, n,
  424. src, src_width, src_start, src_incr, filters);
  425. else
  426. #endif
  427. h_resample_fast(dst, n,
  428. src, src_width, src_start, src_incr, filters);
  429. if (n < dst_width) {
  430. dst += n;
  431. dst_width -= n;
  432. src_start += n * src_incr;
  433. h_resample_slow(dst, dst_width,
  434. src, src_width, src_start, src_incr, filters);
  435. }
  436. }
  437. static void component_resample(ImgReSampleContext *s,
  438. uint8_t *output, int owrap, int owidth, int oheight,
  439. uint8_t *input, int iwrap, int iwidth, int iheight)
  440. {
  441. int src_y, src_y1, last_src_y, ring_y, phase_y, y1, y;
  442. uint8_t *new_line, *src_line;
  443. last_src_y = - FCENTER - 1;
  444. /* position of the bottom of the filter in the source image */
  445. src_y = (last_src_y + NB_TAPS) * POS_FRAC;
  446. ring_y = NB_TAPS; /* position in ring buffer */
  447. for(y=0;y<oheight;y++) {
  448. /* apply horizontal filter on new lines from input if needed */
  449. src_y1 = src_y >> POS_FRAC_BITS;
  450. while (last_src_y < src_y1) {
  451. if (++ring_y >= LINE_BUF_HEIGHT + NB_TAPS)
  452. ring_y = NB_TAPS;
  453. last_src_y++;
  454. /* handle limit conditions : replicate line (slightly
  455. inefficient because we filter multiple times) */
  456. y1 = last_src_y;
  457. if (y1 < 0) {
  458. y1 = 0;
  459. } else if (y1 >= iheight) {
  460. y1 = iheight - 1;
  461. }
  462. src_line = input + y1 * iwrap;
  463. new_line = s->line_buf + ring_y * owidth;
  464. /* apply filter and handle limit cases correctly */
  465. h_resample(new_line, owidth,
  466. src_line, iwidth, - FCENTER * POS_FRAC, s->h_incr,
  467. &s->h_filters[0][0]);
  468. /* handle ring buffer wraping */
  469. if (ring_y >= LINE_BUF_HEIGHT) {
  470. memcpy(s->line_buf + (ring_y - LINE_BUF_HEIGHT) * owidth,
  471. new_line, owidth);
  472. }
  473. }
  474. /* apply vertical filter */
  475. phase_y = get_phase(src_y);
  476. #ifdef HAVE_MMX
  477. /* desactivated MMX because loss of precision */
  478. if ((mm_flags & MM_MMX) && NB_TAPS == 4 && 0)
  479. v_resample4_mmx(output, owidth,
  480. s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
  481. &s->v_filters[phase_y][0]);
  482. else
  483. #endif
  484. #ifdef HAVE_ALTIVEC
  485. if ((mm_flags & MM_ALTIVEC) && NB_TAPS == 4 && FILTER_BITS <= 6)
  486. v_resample16_altivec(output, owidth,
  487. s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
  488. &s->v_filters[phase_y][0]);
  489. else
  490. #endif
  491. v_resample(output, owidth,
  492. s->line_buf + (ring_y - NB_TAPS + 1) * owidth, owidth,
  493. &s->v_filters[phase_y][0]);
  494. src_y += s->v_incr;
  495. output += owrap;
  496. }
  497. }
  498. ImgReSampleContext *img_resample_init(int owidth, int oheight,
  499. int iwidth, int iheight)
  500. {
  501. return img_resample_full_init(owidth, oheight, iwidth, iheight,
  502. 0, 0, 0, 0, 0, 0, 0, 0);
  503. }
  504. ImgReSampleContext *img_resample_full_init(int owidth, int oheight,
  505. int iwidth, int iheight,
  506. int topBand, int bottomBand,
  507. int leftBand, int rightBand,
  508. int padtop, int padbottom,
  509. int padleft, int padright)
  510. {
  511. ImgReSampleContext *s;
  512. if (!owidth || !oheight || !iwidth || !iheight)
  513. return NULL;
  514. s = av_mallocz(sizeof(ImgReSampleContext));
  515. if (!s)
  516. return NULL;
  517. if((unsigned)owidth >= UINT_MAX / (LINE_BUF_HEIGHT + NB_TAPS))
  518. return NULL;
  519. s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
  520. if (!s->line_buf)
  521. goto fail;
  522. s->owidth = owidth;
  523. s->oheight = oheight;
  524. s->iwidth = iwidth;
  525. s->iheight = iheight;
  526. s->topBand = topBand;
  527. s->bottomBand = bottomBand;
  528. s->leftBand = leftBand;
  529. s->rightBand = rightBand;
  530. s->padtop = padtop;
  531. s->padbottom = padbottom;
  532. s->padleft = padleft;
  533. s->padright = padright;
  534. s->pad_owidth = owidth - (padleft + padright);
  535. s->pad_oheight = oheight - (padtop + padbottom);
  536. s->h_incr = ((iwidth - leftBand - rightBand) * POS_FRAC) / s->pad_owidth;
  537. s->v_incr = ((iheight - topBand - bottomBand) * POS_FRAC) / s->pad_oheight;
  538. av_build_filter(&s->h_filters[0][0], (float) s->pad_owidth /
  539. (float) (iwidth - leftBand - rightBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
  540. av_build_filter(&s->v_filters[0][0], (float) s->pad_oheight /
  541. (float) (iheight - topBand - bottomBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0);
  542. return s;
  543. fail:
  544. av_free(s);
  545. return NULL;
  546. }
  547. void img_resample(ImgReSampleContext *s,
  548. AVPicture *output, const AVPicture *input)
  549. {
  550. int i, shift;
  551. uint8_t* optr;
  552. for (i=0;i<3;i++) {
  553. shift = (i == 0) ? 0 : 1;
  554. optr = output->data[i] + (((output->linesize[i] *
  555. s->padtop) + s->padleft) >> shift);
  556. component_resample(s, optr, output->linesize[i],
  557. s->pad_owidth >> shift, s->pad_oheight >> shift,
  558. input->data[i] + (input->linesize[i] *
  559. (s->topBand >> shift)) + (s->leftBand >> shift),
  560. input->linesize[i], ((s->iwidth - s->leftBand -
  561. s->rightBand) >> shift),
  562. (s->iheight - s->topBand - s->bottomBand) >> shift);
  563. }
  564. }
  565. void img_resample_close(ImgReSampleContext *s)
  566. {
  567. av_free(s->line_buf);
  568. av_free(s);
  569. }
  570. struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat,
  571. int dstW, int dstH, int dstFormat,
  572. int flags, SwsFilter *srcFilter,
  573. SwsFilter *dstFilter, double *param)
  574. {
  575. struct SwsContext *ctx;
  576. ctx = av_malloc(sizeof(struct SwsContext));
  577. if (ctx == NULL) {
  578. av_log(NULL, AV_LOG_ERROR, "Cannot allocate a resampling context!\n");
  579. return NULL;
  580. }
  581. if ((srcH != dstH) || (srcW != dstW)) {
  582. if ((srcFormat != PIX_FMT_YUV420P) || (dstFormat != PIX_FMT_YUV420P)) {
  583. av_log(NULL, AV_LOG_INFO, "PIX_FMT_YUV420P will be used as an intermediate format for rescaling\n");
  584. }
  585. ctx->resampling_ctx = img_resample_init(dstW, dstH, srcW, srcH);
  586. } else {
  587. ctx->resampling_ctx = av_malloc(sizeof(ImgReSampleContext));
  588. ctx->resampling_ctx->iheight = srcH;
  589. ctx->resampling_ctx->iwidth = srcW;
  590. ctx->resampling_ctx->oheight = dstH;
  591. ctx->resampling_ctx->owidth = dstW;
  592. }
  593. ctx->src_pix_fmt = srcFormat;
  594. ctx->dst_pix_fmt = dstFormat;
  595. return ctx;
  596. }
  597. void sws_freeContext(struct SwsContext *ctx)
  598. {
  599. if (!ctx)
  600. return;
  601. if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
  602. (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
  603. img_resample_close(ctx->resampling_ctx);
  604. } else {
  605. av_free(ctx->resampling_ctx);
  606. }
  607. av_free(ctx);
  608. }
  609. /**
  610. * Checks if context is valid or reallocs a new one instead.
  611. * If context is NULL, just calls sws_getContext() to get a new one.
  612. * Otherwise, checks if the parameters are the same already saved in context.
  613. * If that is the case, returns the current context.
  614. * Otherwise, frees context and gets a new one.
  615. *
  616. * Be warned that srcFilter, dstFilter are not checked, they are
  617. * asumed to remain valid.
  618. */
  619. struct SwsContext *sws_getCachedContext(struct SwsContext *ctx,
  620. int srcW, int srcH, int srcFormat,
  621. int dstW, int dstH, int dstFormat, int flags,
  622. SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
  623. {
  624. if (ctx != NULL) {
  625. if ((ctx->resampling_ctx->iwidth != srcW) ||
  626. (ctx->resampling_ctx->iheight != srcH) ||
  627. (ctx->src_pix_fmt != srcFormat) ||
  628. (ctx->resampling_ctx->owidth != dstW) ||
  629. (ctx->resampling_ctx->oheight != dstH) ||
  630. (ctx->dst_pix_fmt != dstFormat))
  631. {
  632. sws_freeContext(ctx);
  633. ctx = NULL;
  634. }
  635. }
  636. if (ctx == NULL) {
  637. return sws_getContext(srcW, srcH, srcFormat,
  638. dstW, dstH, dstFormat, flags,
  639. srcFilter, dstFilter, param);
  640. }
  641. return ctx;
  642. }
  643. int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
  644. int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[])
  645. {
  646. AVPicture src_pict, dst_pict;
  647. int i, res = 0;
  648. AVPicture picture_format_temp;
  649. AVPicture picture_resample_temp, *formatted_picture, *resampled_picture;
  650. uint8_t *buf1 = NULL, *buf2 = NULL;
  651. enum PixelFormat current_pix_fmt;
  652. for (i = 0; i < 4; i++) {
  653. src_pict.data[i] = src[i];
  654. src_pict.linesize[i] = srcStride[i];
  655. dst_pict.data[i] = dst[i];
  656. dst_pict.linesize[i] = dstStride[i];
  657. }
  658. if ((ctx->resampling_ctx->iwidth != ctx->resampling_ctx->owidth) ||
  659. (ctx->resampling_ctx->iheight != ctx->resampling_ctx->oheight)) {
  660. /* We have to rescale the picture, but only YUV420P rescaling is supported... */
  661. if (ctx->src_pix_fmt != PIX_FMT_YUV420P) {
  662. int size;
  663. /* create temporary picture for rescaling input*/
  664. size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
  665. buf1 = av_malloc(size);
  666. if (!buf1) {
  667. res = -1;
  668. goto the_end;
  669. }
  670. formatted_picture = &picture_format_temp;
  671. avpicture_fill((AVPicture*)formatted_picture, buf1,
  672. PIX_FMT_YUV420P, ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight);
  673. if (img_convert((AVPicture*)formatted_picture, PIX_FMT_YUV420P,
  674. &src_pict, ctx->src_pix_fmt,
  675. ctx->resampling_ctx->iwidth, ctx->resampling_ctx->iheight) < 0) {
  676. av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
  677. res = -1;
  678. goto the_end;
  679. }
  680. } else {
  681. formatted_picture = &src_pict;
  682. }
  683. if (ctx->dst_pix_fmt != PIX_FMT_YUV420P) {
  684. int size;
  685. /* create temporary picture for rescaling output*/
  686. size = avpicture_get_size(PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
  687. buf2 = av_malloc(size);
  688. if (!buf2) {
  689. res = -1;
  690. goto the_end;
  691. }
  692. resampled_picture = &picture_resample_temp;
  693. avpicture_fill((AVPicture*)resampled_picture, buf2,
  694. PIX_FMT_YUV420P, ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
  695. } else {
  696. resampled_picture = &dst_pict;
  697. }
  698. /* ...and finally rescale!!! */
  699. img_resample(ctx->resampling_ctx, resampled_picture, formatted_picture);
  700. current_pix_fmt = PIX_FMT_YUV420P;
  701. } else {
  702. resampled_picture = &src_pict;
  703. current_pix_fmt = ctx->src_pix_fmt;
  704. }
  705. if (current_pix_fmt != ctx->dst_pix_fmt) {
  706. if (img_convert(&dst_pict, ctx->dst_pix_fmt,
  707. resampled_picture, current_pix_fmt,
  708. ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight) < 0) {
  709. av_log(NULL, AV_LOG_ERROR, "pixel format conversion not handled\n");
  710. res = -1;
  711. goto the_end;
  712. }
  713. } else if (resampled_picture != &dst_pict) {
  714. av_picture_copy(&dst_pict, resampled_picture, current_pix_fmt,
  715. ctx->resampling_ctx->owidth, ctx->resampling_ctx->oheight);
  716. }
  717. the_end:
  718. av_free(buf1);
  719. av_free(buf2);
  720. return res;
  721. }
  722. #ifdef TEST
  723. #include <stdio.h>
  724. /* input */
  725. #define XSIZE 256
  726. #define YSIZE 256
  727. uint8_t img[XSIZE * YSIZE];
  728. /* output */
  729. #define XSIZE1 512
  730. #define YSIZE1 512
  731. uint8_t img1[XSIZE1 * YSIZE1];
  732. uint8_t img2[XSIZE1 * YSIZE1];
  733. void save_pgm(const char *filename, uint8_t *img, int xsize, int ysize)
  734. {
  735. #undef fprintf
  736. FILE *f;
  737. f=fopen(filename,"w");
  738. fprintf(f,"P5\n%d %d\n%d\n", xsize, ysize, 255);
  739. fwrite(img,1, xsize * ysize,f);
  740. fclose(f);
  741. #define fprintf please_use_av_log
  742. }
  743. static void dump_filter(int16_t *filter)
  744. {
  745. int i, ph;
  746. for(ph=0;ph<NB_PHASES;ph++) {
  747. av_log(NULL, AV_LOG_INFO, "%2d: ", ph);
  748. for(i=0;i<NB_TAPS;i++) {
  749. av_log(NULL, AV_LOG_INFO, " %5.2f", filter[ph * NB_TAPS + i] / 256.0);
  750. }
  751. av_log(NULL, AV_LOG_INFO, "\n");
  752. }
  753. }
  754. #ifdef HAVE_MMX
  755. int mm_flags;
  756. #endif
  757. int main(int argc, char **argv)
  758. {
  759. int x, y, v, i, xsize, ysize;
  760. ImgReSampleContext *s;
  761. float fact, factors[] = { 1/2.0, 3.0/4.0, 1.0, 4.0/3.0, 16.0/9.0, 2.0 };
  762. char buf[256];
  763. /* build test image */
  764. for(y=0;y<YSIZE;y++) {
  765. for(x=0;x<XSIZE;x++) {
  766. if (x < XSIZE/2 && y < YSIZE/2) {
  767. if (x < XSIZE/4 && y < YSIZE/4) {
  768. if ((x % 10) <= 6 &&
  769. (y % 10) <= 6)
  770. v = 0xff;
  771. else
  772. v = 0x00;
  773. } else if (x < XSIZE/4) {
  774. if (x & 1)
  775. v = 0xff;
  776. else
  777. v = 0;
  778. } else if (y < XSIZE/4) {
  779. if (y & 1)
  780. v = 0xff;
  781. else
  782. v = 0;
  783. } else {
  784. if (y < YSIZE*3/8) {
  785. if ((y+x) & 1)
  786. v = 0xff;
  787. else
  788. v = 0;
  789. } else {
  790. if (((x+3) % 4) <= 1 &&
  791. ((y+3) % 4) <= 1)
  792. v = 0xff;
  793. else
  794. v = 0x00;
  795. }
  796. }
  797. } else if (x < XSIZE/2) {
  798. v = ((x - (XSIZE/2)) * 255) / (XSIZE/2);
  799. } else if (y < XSIZE/2) {
  800. v = ((y - (XSIZE/2)) * 255) / (XSIZE/2);
  801. } else {
  802. v = ((x + y - XSIZE) * 255) / XSIZE;
  803. }
  804. img[(YSIZE - y) * XSIZE + (XSIZE - x)] = v;
  805. }
  806. }
  807. save_pgm("/tmp/in.pgm", img, XSIZE, YSIZE);
  808. for(i=0;i<sizeof(factors)/sizeof(float);i++) {
  809. fact = factors[i];
  810. xsize = (int)(XSIZE * fact);
  811. ysize = (int)((YSIZE - 100) * fact);
  812. s = img_resample_full_init(xsize, ysize, XSIZE, YSIZE, 50 ,50, 0, 0, 0, 0, 0, 0);
  813. av_log(NULL, AV_LOG_INFO, "Factor=%0.2f\n", fact);
  814. dump_filter(&s->h_filters[0][0]);
  815. component_resample(s, img1, xsize, xsize, ysize,
  816. img + 50 * XSIZE, XSIZE, XSIZE, YSIZE - 100);
  817. img_resample_close(s);
  818. snprintf(buf, sizeof(buf), "/tmp/out%d.pgm", i);
  819. save_pgm(buf, img1, xsize, ysize);
  820. }
  821. /* mmx test */
  822. #ifdef HAVE_MMX
  823. av_log(NULL, AV_LOG_INFO, "MMX test\n");
  824. fact = 0.72;
  825. xsize = (int)(XSIZE * fact);
  826. ysize = (int)(YSIZE * fact);
  827. mm_flags = MM_MMX;
  828. s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
  829. component_resample(s, img1, xsize, xsize, ysize,
  830. img, XSIZE, XSIZE, YSIZE);
  831. mm_flags = 0;
  832. s = img_resample_init(xsize, ysize, XSIZE, YSIZE);
  833. component_resample(s, img2, xsize, xsize, ysize,
  834. img, XSIZE, XSIZE, YSIZE);
  835. if (memcmp(img1, img2, xsize * ysize) != 0) {
  836. av_log(NULL, AV_LOG_ERROR, "mmx error\n");
  837. exit(1);
  838. }
  839. av_log(NULL, AV_LOG_INFO, "MMX OK\n");
  840. #endif
  841. return 0;
  842. }
  843. #endif