swscale_unscaled.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <inttypes.h>
  21. #include <string.h>
  22. #include <math.h>
  23. #include <stdio.h>
  24. #include "config.h"
  25. #include "swscale.h"
  26. #include "swscale_internal.h"
  27. #include "rgb2rgb.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "libavutil/cpu.h"
  30. #include "libavutil/avutil.h"
  31. #include "libavutil/mathematics.h"
  32. #include "libavutil/bswap.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "libavutil/avassert.h"
  35. #define RGB2YUV_SHIFT 15
  36. #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  37. #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  38. #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  39. #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  40. #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  41. #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  42. #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  43. #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  44. #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  45. DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={
  46. {
  47. { 0, 1, 0, 1, 0, 1, 0, 1,},
  48. { 1, 0, 1, 0, 1, 0, 1, 0,},
  49. { 0, 1, 0, 1, 0, 1, 0, 1,},
  50. { 1, 0, 1, 0, 1, 0, 1, 0,},
  51. { 0, 1, 0, 1, 0, 1, 0, 1,},
  52. { 1, 0, 1, 0, 1, 0, 1, 0,},
  53. { 0, 1, 0, 1, 0, 1, 0, 1,},
  54. { 1, 0, 1, 0, 1, 0, 1, 0,},
  55. },{
  56. { 1, 2, 1, 2, 1, 2, 1, 2,},
  57. { 3, 0, 3, 0, 3, 0, 3, 0,},
  58. { 1, 2, 1, 2, 1, 2, 1, 2,},
  59. { 3, 0, 3, 0, 3, 0, 3, 0,},
  60. { 1, 2, 1, 2, 1, 2, 1, 2,},
  61. { 3, 0, 3, 0, 3, 0, 3, 0,},
  62. { 1, 2, 1, 2, 1, 2, 1, 2,},
  63. { 3, 0, 3, 0, 3, 0, 3, 0,},
  64. },{
  65. { 2, 4, 3, 5, 2, 4, 3, 5,},
  66. { 6, 0, 7, 1, 6, 0, 7, 1,},
  67. { 3, 5, 2, 4, 3, 5, 2, 4,},
  68. { 7, 1, 6, 0, 7, 1, 6, 0,},
  69. { 2, 4, 3, 5, 2, 4, 3, 5,},
  70. { 6, 0, 7, 1, 6, 0, 7, 1,},
  71. { 3, 5, 2, 4, 3, 5, 2, 4,},
  72. { 7, 1, 6, 0, 7, 1, 6, 0,},
  73. },{
  74. { 4, 8, 7, 11, 4, 8, 7, 11,},
  75. { 12, 0, 15, 3, 12, 0, 15, 3,},
  76. { 6, 10, 5, 9, 6, 10, 5, 9,},
  77. { 14, 2, 13, 1, 14, 2, 13, 1,},
  78. { 4, 8, 7, 11, 4, 8, 7, 11,},
  79. { 12, 0, 15, 3, 12, 0, 15, 3,},
  80. { 6, 10, 5, 9, 6, 10, 5, 9,},
  81. { 14, 2, 13, 1, 14, 2, 13, 1,},
  82. },{
  83. { 9, 17, 15, 23, 8, 16, 14, 22,},
  84. { 25, 1, 31, 7, 24, 0, 30, 6,},
  85. { 13, 21, 11, 19, 12, 20, 10, 18,},
  86. { 29, 5, 27, 3, 28, 4, 26, 2,},
  87. { 8, 16, 14, 22, 9, 17, 15, 23,},
  88. { 24, 0, 30, 6, 25, 1, 31, 7,},
  89. { 12, 20, 10, 18, 13, 21, 11, 19,},
  90. { 28, 4, 26, 2, 29, 5, 27, 3,},
  91. },{
  92. { 18, 34, 30, 46, 17, 33, 29, 45,},
  93. { 50, 2, 62, 14, 49, 1, 61, 13,},
  94. { 26, 42, 22, 38, 25, 41, 21, 37,},
  95. { 58, 10, 54, 6, 57, 9, 53, 5,},
  96. { 16, 32, 28, 44, 19, 35, 31, 47,},
  97. { 48, 0, 60, 12, 51, 3, 63, 15,},
  98. { 24, 40, 20, 36, 27, 43, 23, 39,},
  99. { 56, 8, 52, 4, 59, 11, 55, 7,},
  100. },{
  101. { 18, 34, 30, 46, 17, 33, 29, 45,},
  102. { 50, 2, 62, 14, 49, 1, 61, 13,},
  103. { 26, 42, 22, 38, 25, 41, 21, 37,},
  104. { 58, 10, 54, 6, 57, 9, 53, 5,},
  105. { 16, 32, 28, 44, 19, 35, 31, 47,},
  106. { 48, 0, 60, 12, 51, 3, 63, 15,},
  107. { 24, 40, 20, 36, 27, 43, 23, 39,},
  108. { 56, 8, 52, 4, 59, 11, 55, 7,},
  109. },{
  110. { 36, 68, 60, 92, 34, 66, 58, 90,},
  111. { 100, 4,124, 28, 98, 2,122, 26,},
  112. { 52, 84, 44, 76, 50, 82, 42, 74,},
  113. { 116, 20,108, 12,114, 18,106, 10,},
  114. { 32, 64, 56, 88, 38, 70, 62, 94,},
  115. { 96, 0,120, 24,102, 6,126, 30,},
  116. { 48, 80, 40, 72, 54, 86, 46, 78,},
  117. { 112, 16,104, 8,118, 22,110, 14,},
  118. }};
  119. static const uint8_t flat64[8]={64,64,64,64,64,64,64,64};
  120. const uint16_t dither_scale[15][16]={
  121. { 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,},
  122. { 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,},
  123. { 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,},
  124. { 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,},
  125. { 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,},
  126. { 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,},
  127. { 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,},
  128. { 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,},
  129. { 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,},
  130. { 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,},
  131. { 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,},
  132. { 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,},
  133. { 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,},
  134. { 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,},
  135. { 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,},
  136. };
  137. static void fillPlane(uint8_t *plane, int stride, int width, int height, int y,
  138. uint8_t val)
  139. {
  140. int i;
  141. uint8_t *ptr = plane + stride * y;
  142. for (i = 0; i < height; i++) {
  143. memset(ptr, val, width);
  144. ptr += stride;
  145. }
  146. }
  147. static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y,
  148. int alpha, int bits)
  149. {
  150. int i, j;
  151. uint8_t *ptr = plane + stride * y;
  152. int v = alpha ? -1 : (1<<bits);
  153. for (i = 0; i < height; i++) {
  154. for (j = 0; j < width; j++) {
  155. AV_WN16(ptr+2*j, v);
  156. }
  157. ptr += stride;
  158. }
  159. }
  160. static void copyPlane(const uint8_t *src, int srcStride,
  161. int srcSliceY, int srcSliceH, int width,
  162. uint8_t *dst, int dstStride)
  163. {
  164. dst += dstStride * srcSliceY;
  165. if (dstStride == srcStride && srcStride > 0) {
  166. memcpy(dst, src, srcSliceH * dstStride);
  167. } else {
  168. int i;
  169. for (i = 0; i < srcSliceH; i++) {
  170. memcpy(dst, src, width);
  171. src += srcStride;
  172. dst += dstStride;
  173. }
  174. }
  175. }
  176. static int planarToNv12Wrapper(SwsContext *c, const uint8_t *src[],
  177. int srcStride[], int srcSliceY,
  178. int srcSliceH, uint8_t *dstParam[],
  179. int dstStride[])
  180. {
  181. uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  182. copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
  183. dstParam[0], dstStride[0]);
  184. if (c->dstFormat == PIX_FMT_NV12)
  185. interleaveBytes(src[1], src[2], dst, c->srcW / 2, srcSliceH / 2,
  186. srcStride[1], srcStride[2], dstStride[0]);
  187. else
  188. interleaveBytes(src[2], src[1], dst, c->srcW / 2, srcSliceH / 2,
  189. srcStride[2], srcStride[1], dstStride[0]);
  190. return srcSliceH;
  191. }
  192. static int planarToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
  193. int srcStride[], int srcSliceY, int srcSliceH,
  194. uint8_t *dstParam[], int dstStride[])
  195. {
  196. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  197. yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  198. srcStride[1], dstStride[0]);
  199. return srcSliceH;
  200. }
  201. static int planarToUyvyWrapper(SwsContext *c, const uint8_t *src[],
  202. int srcStride[], int srcSliceY, int srcSliceH,
  203. uint8_t *dstParam[], int dstStride[])
  204. {
  205. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  206. yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  207. srcStride[1], dstStride[0]);
  208. return srcSliceH;
  209. }
  210. static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
  211. int srcStride[], int srcSliceY, int srcSliceH,
  212. uint8_t *dstParam[], int dstStride[])
  213. {
  214. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  215. yuv422ptoyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  216. srcStride[1], dstStride[0]);
  217. return srcSliceH;
  218. }
  219. static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t *src[],
  220. int srcStride[], int srcSliceY, int srcSliceH,
  221. uint8_t *dstParam[], int dstStride[])
  222. {
  223. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  224. yuv422ptouyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  225. srcStride[1], dstStride[0]);
  226. return srcSliceH;
  227. }
  228. static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
  229. int srcStride[], int srcSliceY, int srcSliceH,
  230. uint8_t *dstParam[], int dstStride[])
  231. {
  232. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  233. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  234. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  235. yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  236. dstStride[1], srcStride[0]);
  237. if (dstParam[3])
  238. fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  239. return srcSliceH;
  240. }
  241. static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
  242. int srcStride[], int srcSliceY, int srcSliceH,
  243. uint8_t *dstParam[], int dstStride[])
  244. {
  245. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  246. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  247. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  248. yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  249. dstStride[1], srcStride[0]);
  250. return srcSliceH;
  251. }
  252. static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
  253. int srcStride[], int srcSliceY, int srcSliceH,
  254. uint8_t *dstParam[], int dstStride[])
  255. {
  256. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  257. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  258. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  259. uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  260. dstStride[1], srcStride[0]);
  261. if (dstParam[3])
  262. fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  263. return srcSliceH;
  264. }
  265. static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
  266. int srcStride[], int srcSliceY, int srcSliceH,
  267. uint8_t *dstParam[], int dstStride[])
  268. {
  269. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  270. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  271. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  272. uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  273. dstStride[1], srcStride[0]);
  274. return srcSliceH;
  275. }
  276. static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels,
  277. const uint8_t *palette)
  278. {
  279. int i;
  280. for (i = 0; i < num_pixels; i++)
  281. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | (src[(i << 1) + 1] << 24);
  282. }
  283. static void gray8aToPacked32_1(const uint8_t *src, uint8_t *dst, int num_pixels,
  284. const uint8_t *palette)
  285. {
  286. int i;
  287. for (i = 0; i < num_pixels; i++)
  288. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | src[(i << 1) + 1];
  289. }
  290. static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels,
  291. const uint8_t *palette)
  292. {
  293. int i;
  294. for (i = 0; i < num_pixels; i++) {
  295. //FIXME slow?
  296. dst[0] = palette[src[i << 1] * 4 + 0];
  297. dst[1] = palette[src[i << 1] * 4 + 1];
  298. dst[2] = palette[src[i << 1] * 4 + 2];
  299. dst += 3;
  300. }
  301. }
  302. static int packed_16bpc_bswap(SwsContext *c, const uint8_t *src[],
  303. int srcStride[], int srcSliceY, int srcSliceH,
  304. uint8_t *dst[], int dstStride[])
  305. {
  306. int i, j;
  307. int srcstr = srcStride[0] >> 1;
  308. int dststr = dstStride[0] >> 1;
  309. uint16_t *dstPtr = (uint16_t *) dst[0];
  310. const uint16_t *srcPtr = (const uint16_t *) src[0];
  311. int min_stride = FFMIN(srcstr, dststr);
  312. for (i = 0; i < srcSliceH; i++) {
  313. for (j = 0; j < min_stride; j++) {
  314. dstPtr[j] = av_bswap16(srcPtr[j]);
  315. }
  316. srcPtr += srcstr;
  317. dstPtr += dststr;
  318. }
  319. return srcSliceH;
  320. }
  321. static int palToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
  322. int srcSliceY, int srcSliceH, uint8_t *dst[],
  323. int dstStride[])
  324. {
  325. const enum PixelFormat srcFormat = c->srcFormat;
  326. const enum PixelFormat dstFormat = c->dstFormat;
  327. void (*conv)(const uint8_t *src, uint8_t *dst, int num_pixels,
  328. const uint8_t *palette) = NULL;
  329. int i;
  330. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  331. const uint8_t *srcPtr = src[0];
  332. if (srcFormat == PIX_FMT_GRAY8A) {
  333. switch (dstFormat) {
  334. case PIX_FMT_RGB32 : conv = gray8aToPacked32; break;
  335. case PIX_FMT_BGR32 : conv = gray8aToPacked32; break;
  336. case PIX_FMT_BGR32_1: conv = gray8aToPacked32_1; break;
  337. case PIX_FMT_RGB32_1: conv = gray8aToPacked32_1; break;
  338. case PIX_FMT_RGB24 : conv = gray8aToPacked24; break;
  339. case PIX_FMT_BGR24 : conv = gray8aToPacked24; break;
  340. }
  341. } else if (usePal(srcFormat)) {
  342. switch (dstFormat) {
  343. case PIX_FMT_RGB32 : conv = sws_convertPalette8ToPacked32; break;
  344. case PIX_FMT_BGR32 : conv = sws_convertPalette8ToPacked32; break;
  345. case PIX_FMT_BGR32_1: conv = sws_convertPalette8ToPacked32; break;
  346. case PIX_FMT_RGB32_1: conv = sws_convertPalette8ToPacked32; break;
  347. case PIX_FMT_RGB24 : conv = sws_convertPalette8ToPacked24; break;
  348. case PIX_FMT_BGR24 : conv = sws_convertPalette8ToPacked24; break;
  349. }
  350. }
  351. if (!conv)
  352. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  353. av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
  354. else {
  355. for (i = 0; i < srcSliceH; i++) {
  356. conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb);
  357. srcPtr += srcStride[0];
  358. dstPtr += dstStride[0];
  359. }
  360. }
  361. return srcSliceH;
  362. }
  363. static void gbr24ptopacked24(const uint8_t *src[], int srcStride[],
  364. uint8_t *dst, int dstStride, int srcSliceH,
  365. int width)
  366. {
  367. int x, h, i;
  368. for (h = 0; h < srcSliceH; h++) {
  369. uint8_t *dest = dst + dstStride * h;
  370. for (x = 0; x < width; x++) {
  371. *dest++ = src[0][x];
  372. *dest++ = src[1][x];
  373. *dest++ = src[2][x];
  374. }
  375. for (i = 0; i < 3; i++)
  376. src[i] += srcStride[i];
  377. }
  378. }
  379. static void gbr24ptopacked32(const uint8_t *src[], int srcStride[],
  380. uint8_t *dst, int dstStride, int srcSliceH,
  381. int alpha_first, int width)
  382. {
  383. int x, h, i;
  384. for (h = 0; h < srcSliceH; h++) {
  385. uint8_t *dest = dst + dstStride * h;
  386. if (alpha_first) {
  387. for (x = 0; x < width; x++) {
  388. *dest++ = 0xff;
  389. *dest++ = src[0][x];
  390. *dest++ = src[1][x];
  391. *dest++ = src[2][x];
  392. }
  393. } else {
  394. for (x = 0; x < width; x++) {
  395. *dest++ = src[0][x];
  396. *dest++ = src[1][x];
  397. *dest++ = src[2][x];
  398. *dest++ = 0xff;
  399. }
  400. }
  401. for (i = 0; i < 3; i++)
  402. src[i] += srcStride[i];
  403. }
  404. }
  405. static int planarRgbToRgbWrapper(SwsContext *c, const uint8_t *src[],
  406. int srcStride[], int srcSliceY, int srcSliceH,
  407. uint8_t *dst[], int dstStride[])
  408. {
  409. int alpha_first = 0;
  410. if (c->srcFormat != PIX_FMT_GBRP) {
  411. av_log(c, AV_LOG_ERROR, "unsupported planar RGB conversion %s -> %s\n",
  412. av_get_pix_fmt_name(c->srcFormat),
  413. av_get_pix_fmt_name(c->dstFormat));
  414. return srcSliceH;
  415. }
  416. switch (c->dstFormat) {
  417. case PIX_FMT_BGR24:
  418. gbr24ptopacked24((const uint8_t *[]) { src[1], src[0], src[2] },
  419. (int []) { srcStride[1], srcStride[0], srcStride[2] },
  420. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  421. srcSliceH, c->srcW);
  422. break;
  423. case PIX_FMT_RGB24:
  424. gbr24ptopacked24((const uint8_t *[]) { src[2], src[0], src[1] },
  425. (int []) { srcStride[2], srcStride[0], srcStride[1] },
  426. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  427. srcSliceH, c->srcW);
  428. break;
  429. case PIX_FMT_ARGB:
  430. alpha_first = 1;
  431. case PIX_FMT_RGBA:
  432. gbr24ptopacked32((const uint8_t *[]) { src[2], src[0], src[1] },
  433. (int []) { srcStride[2], srcStride[0], srcStride[1] },
  434. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  435. srcSliceH, alpha_first, c->srcW);
  436. break;
  437. case PIX_FMT_ABGR:
  438. alpha_first = 1;
  439. case PIX_FMT_BGRA:
  440. gbr24ptopacked32((const uint8_t *[]) { src[1], src[0], src[2] },
  441. (int []) { srcStride[1], srcStride[0], srcStride[2] },
  442. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  443. srcSliceH, alpha_first, c->srcW);
  444. break;
  445. default:
  446. av_log(c, AV_LOG_ERROR,
  447. "unsupported planar RGB conversion %s -> %s\n",
  448. av_get_pix_fmt_name(c->srcFormat),
  449. av_get_pix_fmt_name(c->dstFormat));
  450. }
  451. return srcSliceH;
  452. }
  453. #define isRGBA32(x) ( \
  454. (x) == PIX_FMT_ARGB \
  455. || (x) == PIX_FMT_RGBA \
  456. || (x) == PIX_FMT_BGRA \
  457. || (x) == PIX_FMT_ABGR \
  458. )
  459. #define isRGBA64(x) ( \
  460. (x) == PIX_FMT_RGBA64LE \
  461. || (x) == PIX_FMT_RGBA64BE \
  462. || (x) == PIX_FMT_BGRA64LE \
  463. || (x) == PIX_FMT_BGRA64BE \
  464. )
  465. #define isRGB48(x) ( \
  466. (x) == PIX_FMT_RGB48LE \
  467. || (x) == PIX_FMT_RGB48BE \
  468. || (x) == PIX_FMT_BGR48LE \
  469. || (x) == PIX_FMT_BGR48BE \
  470. )
  471. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  472. typedef void (* rgbConvFn) (const uint8_t *, uint8_t *, int);
  473. static rgbConvFn findRgbConvFn(SwsContext *c)
  474. {
  475. const enum PixelFormat srcFormat = c->srcFormat;
  476. const enum PixelFormat dstFormat = c->dstFormat;
  477. const int srcId = c->srcFormatBpp;
  478. const int dstId = c->dstFormatBpp;
  479. rgbConvFn conv = NULL;
  480. #define IS_NOT_NE(bpp, fmt) \
  481. (((bpp + 7) >> 3) == 2 && \
  482. (!(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_BE) != !HAVE_BIGENDIAN))
  483. #define CONV_IS(src, dst) (srcFormat == PIX_FMT_##src && dstFormat == PIX_FMT_##dst)
  484. if (isRGBA32(srcFormat) && isRGBA32(dstFormat)) {
  485. if ( CONV_IS(ABGR, RGBA)
  486. || CONV_IS(ARGB, BGRA)
  487. || CONV_IS(BGRA, ARGB)
  488. || CONV_IS(RGBA, ABGR)) conv = shuffle_bytes_3210;
  489. else if (CONV_IS(ABGR, ARGB)
  490. || CONV_IS(ARGB, ABGR)) conv = shuffle_bytes_0321;
  491. else if (CONV_IS(ABGR, BGRA)
  492. || CONV_IS(ARGB, RGBA)) conv = shuffle_bytes_1230;
  493. else if (CONV_IS(BGRA, RGBA)
  494. || CONV_IS(RGBA, BGRA)) conv = shuffle_bytes_2103;
  495. else if (CONV_IS(BGRA, ABGR)
  496. || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012;
  497. } else if (isRGB48(srcFormat) && isRGB48(dstFormat)) {
  498. if (CONV_IS(RGB48LE, BGR48LE)
  499. || CONV_IS(BGR48LE, RGB48LE)
  500. || CONV_IS(RGB48BE, BGR48BE)
  501. || CONV_IS(BGR48BE, RGB48BE)) conv = rgb48tobgr48_nobswap;
  502. else if (CONV_IS(RGB48LE, BGR48BE)
  503. || CONV_IS(BGR48LE, RGB48BE)
  504. || CONV_IS(RGB48BE, BGR48LE)
  505. || CONV_IS(BGR48BE, RGB48LE)) conv = rgb48tobgr48_bswap;
  506. } else if (isRGBA64(srcFormat) && isRGB48(dstFormat)) {
  507. if (CONV_IS(RGBA64LE, BGR48LE)
  508. || CONV_IS(BGRA64LE, RGB48LE)
  509. || CONV_IS(RGBA64BE, BGR48BE)
  510. || CONV_IS(BGRA64BE, RGB48BE)) conv = rgb64tobgr48_nobswap;
  511. else if (CONV_IS(RGBA64LE, BGR48BE)
  512. || CONV_IS(BGRA64LE, RGB48BE)
  513. || CONV_IS(RGBA64BE, BGR48LE)
  514. || CONV_IS(BGRA64BE, RGB48LE)) conv = rgb64tobgr48_bswap;
  515. else if (CONV_IS(RGBA64LE, RGB48LE)
  516. || CONV_IS(BGRA64LE, BGR48LE)
  517. || CONV_IS(RGBA64BE, RGB48BE)
  518. || CONV_IS(BGRA64BE, BGR48BE)) conv = rgb64to48_nobswap;
  519. else if (CONV_IS(RGBA64LE, RGB48BE)
  520. || CONV_IS(BGRA64LE, BGR48BE)
  521. || CONV_IS(RGBA64BE, RGB48LE)
  522. || CONV_IS(BGRA64BE, BGR48LE)) conv = rgb64to48_bswap;
  523. } else
  524. /* BGR -> BGR */
  525. if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) ||
  526. (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
  527. switch (srcId | (dstId << 16)) {
  528. case 0x000F000C: conv = rgb12to15; break;
  529. case 0x000F0010: conv = rgb16to15; break;
  530. case 0x000F0018: conv = rgb24to15; break;
  531. case 0x000F0020: conv = rgb32to15; break;
  532. case 0x0010000F: conv = rgb15to16; break;
  533. case 0x00100018: conv = rgb24to16; break;
  534. case 0x00100020: conv = rgb32to16; break;
  535. case 0x0018000F: conv = rgb15to24; break;
  536. case 0x00180010: conv = rgb16to24; break;
  537. case 0x00180020: conv = rgb32to24; break;
  538. case 0x0020000F: conv = rgb15to32; break;
  539. case 0x00200010: conv = rgb16to32; break;
  540. case 0x00200018: conv = rgb24to32; break;
  541. }
  542. } else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) ||
  543. (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
  544. switch (srcId | (dstId << 16)) {
  545. case 0x000C000C: conv = rgb12tobgr12; break;
  546. case 0x000F000F: conv = rgb15tobgr15; break;
  547. case 0x000F0010: conv = rgb16tobgr15; break;
  548. case 0x000F0018: conv = rgb24tobgr15; break;
  549. case 0x000F0020: conv = rgb32tobgr15; break;
  550. case 0x0010000F: conv = rgb15tobgr16; break;
  551. case 0x00100010: conv = rgb16tobgr16; break;
  552. case 0x00100018: conv = rgb24tobgr16; break;
  553. case 0x00100020: conv = rgb32tobgr16; break;
  554. case 0x0018000F: conv = rgb15tobgr24; break;
  555. case 0x00180010: conv = rgb16tobgr24; break;
  556. case 0x00180018: conv = rgb24tobgr24; break;
  557. case 0x00180020: conv = rgb32tobgr24; break;
  558. case 0x0020000F: conv = rgb15tobgr32; break;
  559. case 0x00200010: conv = rgb16tobgr32; break;
  560. case 0x00200018: conv = rgb24tobgr32; break;
  561. }
  562. }
  563. if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat) && ALT32_CORR<0)
  564. return NULL;
  565. return conv;
  566. }
  567. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  568. static int rgbToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
  569. int srcSliceY, int srcSliceH, uint8_t *dst[],
  570. int dstStride[])
  571. {
  572. const enum PixelFormat srcFormat = c->srcFormat;
  573. const enum PixelFormat dstFormat = c->dstFormat;
  574. const int srcBpp = (c->srcFormatBpp + 7) >> 3;
  575. const int dstBpp = (c->dstFormatBpp + 7) >> 3;
  576. rgbConvFn conv = findRgbConvFn(c);
  577. if (!conv) {
  578. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  579. av_get_pix_fmt_name(srcFormat), av_get_pix_fmt_name(dstFormat));
  580. } else {
  581. const uint8_t *srcPtr = src[0];
  582. uint8_t *dstPtr = dst[0];
  583. int src_bswap = IS_NOT_NE(c->srcFormatBpp, srcFormat);
  584. int dst_bswap = IS_NOT_NE(c->dstFormatBpp, dstFormat);
  585. if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) &&
  586. !isRGBA32(dstFormat))
  587. srcPtr += ALT32_CORR;
  588. if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) &&
  589. !isRGBA32(srcFormat))
  590. dstPtr += ALT32_CORR;
  591. if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 &&
  592. !(srcStride[0] % srcBpp) && !dst_bswap && !src_bswap)
  593. conv(srcPtr, dstPtr + dstStride[0] * srcSliceY,
  594. srcSliceH * srcStride[0]);
  595. else {
  596. int i, j;
  597. dstPtr += dstStride[0] * srcSliceY;
  598. for (i = 0; i < srcSliceH; i++) {
  599. if(src_bswap) {
  600. for(j=0; j<c->srcW; j++)
  601. ((uint16_t*)c->formatConvBuffer)[j] = av_bswap16(((uint16_t*)srcPtr)[j]);
  602. conv(c->formatConvBuffer, dstPtr, c->srcW * srcBpp);
  603. }else
  604. conv(srcPtr, dstPtr, c->srcW * srcBpp);
  605. if(dst_bswap)
  606. for(j=0; j<c->srcW; j++)
  607. ((uint16_t*)dstPtr)[j] = av_bswap16(((uint16_t*)dstPtr)[j]);
  608. srcPtr += srcStride[0];
  609. dstPtr += dstStride[0];
  610. }
  611. }
  612. }
  613. return srcSliceH;
  614. }
  615. static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
  616. int srcStride[], int srcSliceY, int srcSliceH,
  617. uint8_t *dst[], int dstStride[])
  618. {
  619. rgb24toyv12(
  620. src[0],
  621. dst[0] + srcSliceY * dstStride[0],
  622. dst[1] + (srcSliceY >> 1) * dstStride[1],
  623. dst[2] + (srcSliceY >> 1) * dstStride[2],
  624. c->srcW, srcSliceH,
  625. dstStride[0], dstStride[1], srcStride[0]);
  626. if (dst[3])
  627. fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  628. return srcSliceH;
  629. }
  630. static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
  631. int srcStride[], int srcSliceY, int srcSliceH,
  632. uint8_t *dst[], int dstStride[])
  633. {
  634. copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
  635. dst[0], dstStride[0]);
  636. planar2x(src[1], dst[1] + dstStride[1] * (srcSliceY >> 1), c->chrSrcW,
  637. srcSliceH >> 2, srcStride[1], dstStride[1]);
  638. planar2x(src[2], dst[2] + dstStride[2] * (srcSliceY >> 1), c->chrSrcW,
  639. srcSliceH >> 2, srcStride[2], dstStride[2]);
  640. if (dst[3])
  641. fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  642. return srcSliceH;
  643. }
  644. /* unscaled copy like stuff (assumes nearly identical formats) */
  645. static int packedCopyWrapper(SwsContext *c, const uint8_t *src[],
  646. int srcStride[], int srcSliceY, int srcSliceH,
  647. uint8_t *dst[], int dstStride[])
  648. {
  649. if (dstStride[0] == srcStride[0] && srcStride[0] > 0)
  650. memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], srcSliceH * dstStride[0]);
  651. else {
  652. int i;
  653. const uint8_t *srcPtr = src[0];
  654. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  655. int length = 0;
  656. /* universal length finder */
  657. while (length + c->srcW <= FFABS(dstStride[0]) &&
  658. length + c->srcW <= FFABS(srcStride[0]))
  659. length += c->srcW;
  660. av_assert1(length != 0);
  661. for (i = 0; i < srcSliceH; i++) {
  662. memcpy(dstPtr, srcPtr, length);
  663. srcPtr += srcStride[0];
  664. dstPtr += dstStride[0];
  665. }
  666. }
  667. return srcSliceH;
  668. }
  669. #define DITHER_COPY(dst, dstStride, src, srcStride, bswap, dbswap)\
  670. uint16_t scale= dither_scale[dst_depth-1][src_depth-1];\
  671. int shift= src_depth-dst_depth + dither_scale[src_depth-2][dst_depth-1];\
  672. for (i = 0; i < height; i++) {\
  673. const uint8_t *dither= dithers[src_depth-9][i&7];\
  674. for (j = 0; j < length-7; j+=8){\
  675. dst[j+0] = dbswap((bswap(src[j+0]) + dither[0])*scale>>shift);\
  676. dst[j+1] = dbswap((bswap(src[j+1]) + dither[1])*scale>>shift);\
  677. dst[j+2] = dbswap((bswap(src[j+2]) + dither[2])*scale>>shift);\
  678. dst[j+3] = dbswap((bswap(src[j+3]) + dither[3])*scale>>shift);\
  679. dst[j+4] = dbswap((bswap(src[j+4]) + dither[4])*scale>>shift);\
  680. dst[j+5] = dbswap((bswap(src[j+5]) + dither[5])*scale>>shift);\
  681. dst[j+6] = dbswap((bswap(src[j+6]) + dither[6])*scale>>shift);\
  682. dst[j+7] = dbswap((bswap(src[j+7]) + dither[7])*scale>>shift);\
  683. }\
  684. for (; j < length; j++)\
  685. dst[j] = dbswap((bswap(src[j]) + dither[j&7])*scale>>shift);\
  686. dst += dstStride;\
  687. src += srcStride;\
  688. }
  689. static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
  690. int srcStride[], int srcSliceY, int srcSliceH,
  691. uint8_t *dst[], int dstStride[])
  692. {
  693. int plane, i, j;
  694. for (plane = 0; plane < 4; plane++) {
  695. int length = (plane == 0 || plane == 3) ? c->srcW : -((-c->srcW ) >> c->chrDstHSubSample);
  696. int y = (plane == 0 || plane == 3) ? srcSliceY: -((-srcSliceY) >> c->chrDstVSubSample);
  697. int height = (plane == 0 || plane == 3) ? srcSliceH: -((-srcSliceH) >> c->chrDstVSubSample);
  698. const uint8_t *srcPtr = src[plane];
  699. uint8_t *dstPtr = dst[plane] + dstStride[plane] * y;
  700. int shiftonly= plane==1 || plane==2 || (!c->srcRange && plane==0);
  701. if (!dst[plane])
  702. continue;
  703. // ignore palette for GRAY8
  704. if (plane == 1 && !dst[2]) continue;
  705. if (!src[plane] || (plane == 1 && !src[2])) {
  706. if (is16BPS(c->dstFormat) || isNBPS(c->dstFormat)) {
  707. fillPlane16(dst[plane], dstStride[plane], length, height, y,
  708. plane == 3, av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1);
  709. } else {
  710. fillPlane(dst[plane], dstStride[plane], length, height, y,
  711. (plane == 3) ? 255 : 128);
  712. }
  713. } else {
  714. if(isNBPS(c->srcFormat) || isNBPS(c->dstFormat)
  715. || (is16BPS(c->srcFormat) != is16BPS(c->dstFormat))
  716. ) {
  717. const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1 + 1;
  718. const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1 + 1;
  719. const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
  720. uint16_t *dstPtr2 = (uint16_t*)dstPtr;
  721. if (dst_depth == 8) {
  722. if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
  723. DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
  724. } else {
  725. DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, )
  726. }
  727. } else if (src_depth == 8) {
  728. for (i = 0; i < height; i++) {
  729. #define COPY816(w)\
  730. if(shiftonly){\
  731. for (j = 0; j < length; j++)\
  732. w(&dstPtr2[j], srcPtr[j]<<(dst_depth-8));\
  733. }else{\
  734. for (j = 0; j < length; j++)\
  735. w(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |\
  736. (srcPtr[j]>>(2*8-dst_depth)));\
  737. }
  738. if(isBE(c->dstFormat)){
  739. COPY816(AV_WB16)
  740. } else {
  741. COPY816(AV_WL16)
  742. }
  743. dstPtr2 += dstStride[plane]/2;
  744. srcPtr += srcStride[plane];
  745. }
  746. } else if (src_depth <= dst_depth) {
  747. int orig_length = length;
  748. for (i = 0; i < height; i++) {
  749. if(isBE(c->srcFormat) == HAVE_BIGENDIAN &&
  750. isBE(c->dstFormat) == HAVE_BIGENDIAN &&
  751. shiftonly) {
  752. unsigned shift = dst_depth - src_depth;
  753. length = orig_length;
  754. #if HAVE_FAST_64BIT
  755. #define FAST_COPY_UP(shift) \
  756. for (j = 0; j < length - 3; j += 4) { \
  757. uint64_t v = AV_RN64A(srcPtr2 + j); \
  758. AV_WN64A(dstPtr2 + j, v << shift); \
  759. } \
  760. length &= 3;
  761. #else
  762. #define FAST_COPY_UP(shift) \
  763. for (j = 0; j < length - 1; j += 2) { \
  764. uint32_t v = AV_RN32A(srcPtr2 + j); \
  765. AV_WN32A(dstPtr2 + j, v << shift); \
  766. } \
  767. length &= 1;
  768. #endif
  769. switch (shift)
  770. {
  771. case 6: FAST_COPY_UP(6); break;
  772. case 7: FAST_COPY_UP(7); break;
  773. }
  774. }
  775. #define COPY_UP(r,w) \
  776. if(shiftonly){\
  777. for (j = 0; j < length; j++){ \
  778. unsigned int v= r(&srcPtr2[j]);\
  779. w(&dstPtr2[j], v<<(dst_depth-src_depth));\
  780. }\
  781. }else{\
  782. for (j = 0; j < length; j++){ \
  783. unsigned int v= r(&srcPtr2[j]);\
  784. w(&dstPtr2[j], (v<<(dst_depth-src_depth)) | \
  785. (v>>(2*src_depth-dst_depth)));\
  786. }\
  787. }
  788. if(isBE(c->srcFormat)){
  789. if(isBE(c->dstFormat)){
  790. COPY_UP(AV_RB16, AV_WB16)
  791. } else {
  792. COPY_UP(AV_RB16, AV_WL16)
  793. }
  794. } else {
  795. if(isBE(c->dstFormat)){
  796. COPY_UP(AV_RL16, AV_WB16)
  797. } else {
  798. COPY_UP(AV_RL16, AV_WL16)
  799. }
  800. }
  801. dstPtr2 += dstStride[plane]/2;
  802. srcPtr2 += srcStride[plane]/2;
  803. }
  804. } else {
  805. if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
  806. if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
  807. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , )
  808. } else {
  809. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16)
  810. }
  811. }else{
  812. if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
  813. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, )
  814. } else {
  815. DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16)
  816. }
  817. }
  818. }
  819. } else if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat) &&
  820. isBE(c->srcFormat) != isBE(c->dstFormat)) {
  821. for (i = 0; i < height; i++) {
  822. for (j = 0; j < length; j++)
  823. ((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]);
  824. srcPtr += srcStride[plane];
  825. dstPtr += dstStride[plane];
  826. }
  827. } else if (dstStride[plane] == srcStride[plane] &&
  828. srcStride[plane] > 0 && srcStride[plane] == length) {
  829. memcpy(dst[plane] + dstStride[plane] * y, src[plane],
  830. height * dstStride[plane]);
  831. } else {
  832. if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat))
  833. length *= 2;
  834. else if (!av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1)
  835. length >>= 3; // monowhite/black
  836. for (i = 0; i < height; i++) {
  837. memcpy(dstPtr, srcPtr, length);
  838. srcPtr += srcStride[plane];
  839. dstPtr += dstStride[plane];
  840. }
  841. }
  842. }
  843. }
  844. return srcSliceH;
  845. }
  846. #define IS_DIFFERENT_ENDIANESS(src_fmt, dst_fmt, pix_fmt) \
  847. ((src_fmt == pix_fmt ## BE && dst_fmt == pix_fmt ## LE) || \
  848. (src_fmt == pix_fmt ## LE && dst_fmt == pix_fmt ## BE))
  849. void ff_get_unscaled_swscale(SwsContext *c)
  850. {
  851. const enum PixelFormat srcFormat = c->srcFormat;
  852. const enum PixelFormat dstFormat = c->dstFormat;
  853. const int flags = c->flags;
  854. const int dstH = c->dstH;
  855. int needsDither;
  856. needsDither = isAnyRGB(dstFormat) &&
  857. c->dstFormatBpp < 24 &&
  858. (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
  859. /* yv12_to_nv12 */
  860. if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) &&
  861. (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) {
  862. c->swScale = planarToNv12Wrapper;
  863. }
  864. /* yuv2bgr */
  865. if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUV422P ||
  866. srcFormat == PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) &&
  867. !(flags & SWS_ACCURATE_RND) && !(dstH & 1)) {
  868. c->swScale = ff_yuv2rgb_get_func_ptr(c);
  869. }
  870. if (srcFormat == PIX_FMT_YUV410P &&
  871. (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) &&
  872. !(flags & SWS_BITEXACT)) {
  873. c->swScale = yvu9ToYv12Wrapper;
  874. }
  875. /* bgr24toYV12 */
  876. if (srcFormat == PIX_FMT_BGR24 &&
  877. (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) &&
  878. !(flags & SWS_ACCURATE_RND))
  879. c->swScale = bgr24ToYv12Wrapper;
  880. /* RGB/BGR -> RGB/BGR (no dither needed forms) */
  881. if (isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && findRgbConvFn(c)
  882. && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT))))
  883. c->swScale= rgbToRgbWrapper;
  884. #define isByteRGB(f) (\
  885. f == PIX_FMT_RGB32 ||\
  886. f == PIX_FMT_RGB32_1 ||\
  887. f == PIX_FMT_RGB24 ||\
  888. f == PIX_FMT_BGR32 ||\
  889. f == PIX_FMT_BGR32_1 ||\
  890. f == PIX_FMT_BGR24)
  891. if (isAnyRGB(srcFormat) && isPlanar(srcFormat) && isByteRGB(dstFormat))
  892. c->swScale = planarRgbToRgbWrapper;
  893. /* bswap 16 bits per pixel/component packed formats */
  894. if (IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR444) ||
  895. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR48) ||
  896. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGRA64) ||
  897. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR555) ||
  898. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_BGR565) ||
  899. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_GRAY16) ||
  900. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB444) ||
  901. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB48) ||
  902. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGBA64) ||
  903. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB555) ||
  904. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, PIX_FMT_RGB565))
  905. c->swScale = packed_16bpc_bswap;
  906. if (usePal(srcFormat) && isByteRGB(dstFormat))
  907. c->swScale = palToRgbWrapper;
  908. if (srcFormat == PIX_FMT_YUV422P) {
  909. if (dstFormat == PIX_FMT_YUYV422)
  910. c->swScale = yuv422pToYuy2Wrapper;
  911. else if (dstFormat == PIX_FMT_UYVY422)
  912. c->swScale = yuv422pToUyvyWrapper;
  913. }
  914. /* LQ converters if -sws 0 or -sws 4*/
  915. if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) {
  916. /* yv12_to_yuy2 */
  917. if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) {
  918. if (dstFormat == PIX_FMT_YUYV422)
  919. c->swScale = planarToYuy2Wrapper;
  920. else if (dstFormat == PIX_FMT_UYVY422)
  921. c->swScale = planarToUyvyWrapper;
  922. }
  923. }
  924. if (srcFormat == PIX_FMT_YUYV422 &&
  925. (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
  926. c->swScale = yuyvToYuv420Wrapper;
  927. if (srcFormat == PIX_FMT_UYVY422 &&
  928. (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
  929. c->swScale = uyvyToYuv420Wrapper;
  930. if (srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P)
  931. c->swScale = yuyvToYuv422Wrapper;
  932. if (srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P)
  933. c->swScale = uyvyToYuv422Wrapper;
  934. #define isPlanarGray(x) (isGray(x) && (x) != PIX_FMT_GRAY8A)
  935. /* simple copy */
  936. if ( srcFormat == dstFormat ||
  937. (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) ||
  938. (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) ||
  939. (isPlanarYUV(srcFormat) && isPlanarGray(dstFormat)) ||
  940. (isPlanarYUV(dstFormat) && isPlanarGray(srcFormat)) ||
  941. (isPlanarGray(dstFormat) && isPlanarGray(srcFormat)) ||
  942. (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) &&
  943. c->chrDstHSubSample == c->chrSrcHSubSample &&
  944. c->chrDstVSubSample == c->chrSrcVSubSample &&
  945. dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 &&
  946. srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21))
  947. {
  948. if (isPacked(c->srcFormat))
  949. c->swScale = packedCopyWrapper;
  950. else /* Planar YUV or gray */
  951. c->swScale = planarCopyWrapper;
  952. }
  953. if (ARCH_BFIN)
  954. ff_bfin_get_unscaled_swscale(c);
  955. if (HAVE_ALTIVEC)
  956. ff_swscale_get_unscaled_altivec(c);
  957. }
  958. static void reset_ptr(const uint8_t *src[], int format)
  959. {
  960. if (!isALPHA(format))
  961. src[3] = NULL;
  962. if (!isPlanar(format)) {
  963. src[3] = src[2] = NULL;
  964. if (!usePal(format))
  965. src[1] = NULL;
  966. }
  967. }
  968. static int check_image_pointers(const uint8_t * const data[4], enum PixelFormat pix_fmt,
  969. const int linesizes[4])
  970. {
  971. const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt];
  972. int i;
  973. for (i = 0; i < 4; i++) {
  974. int plane = desc->comp[i].plane;
  975. if (!data[plane] || !linesizes[plane])
  976. return 0;
  977. }
  978. return 1;
  979. }
  980. /**
  981. * swscale wrapper, so we don't need to export the SwsContext.
  982. * Assumes planar YUV to be in YUV order instead of YVU.
  983. */
  984. int attribute_align_arg sws_scale(struct SwsContext *c,
  985. const uint8_t * const srcSlice[],
  986. const int srcStride[], int srcSliceY,
  987. int srcSliceH, uint8_t *const dst[],
  988. const int dstStride[])
  989. {
  990. int i, ret;
  991. const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] };
  992. uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] };
  993. uint8_t *rgb0_tmp = NULL;
  994. // do not mess up sliceDir if we have a "trailing" 0-size slice
  995. if (srcSliceH == 0)
  996. return 0;
  997. if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) {
  998. av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
  999. return 0;
  1000. }
  1001. if (!check_image_pointers((const uint8_t* const*)dst, c->dstFormat, dstStride)) {
  1002. av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
  1003. return 0;
  1004. }
  1005. if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
  1006. av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
  1007. return 0;
  1008. }
  1009. if (c->sliceDir == 0) {
  1010. if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
  1011. }
  1012. if (usePal(c->srcFormat)) {
  1013. for (i = 0; i < 256; i++) {
  1014. int p, r, g, b, y, u, v, a = 0xff;
  1015. if (c->srcFormat == PIX_FMT_PAL8) {
  1016. p = ((const uint32_t *)(srcSlice[1]))[i];
  1017. a = (p >> 24) & 0xFF;
  1018. r = (p >> 16) & 0xFF;
  1019. g = (p >> 8) & 0xFF;
  1020. b = p & 0xFF;
  1021. } else if (c->srcFormat == PIX_FMT_RGB8) {
  1022. r = ( i >> 5 ) * 36;
  1023. g = ((i >> 2) & 7) * 36;
  1024. b = ( i & 3) * 85;
  1025. } else if (c->srcFormat == PIX_FMT_BGR8) {
  1026. b = ( i >> 6 ) * 85;
  1027. g = ((i >> 3) & 7) * 36;
  1028. r = ( i & 7) * 36;
  1029. } else if (c->srcFormat == PIX_FMT_RGB4_BYTE) {
  1030. r = ( i >> 3 ) * 255;
  1031. g = ((i >> 1) & 3) * 85;
  1032. b = ( i & 1) * 255;
  1033. } else if (c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_GRAY8A) {
  1034. r = g = b = i;
  1035. } else {
  1036. av_assert1(c->srcFormat == PIX_FMT_BGR4_BYTE);
  1037. b = ( i >> 3 ) * 255;
  1038. g = ((i >> 1) & 3) * 85;
  1039. r = ( i & 1) * 255;
  1040. }
  1041. y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  1042. u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  1043. v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  1044. c->pal_yuv[i]= y + (u<<8) + (v<<16) + (a<<24);
  1045. switch (c->dstFormat) {
  1046. case PIX_FMT_BGR32:
  1047. #if !HAVE_BIGENDIAN
  1048. case PIX_FMT_RGB24:
  1049. #endif
  1050. c->pal_rgb[i]= r + (g<<8) + (b<<16) + (a<<24);
  1051. break;
  1052. case PIX_FMT_BGR32_1:
  1053. #if HAVE_BIGENDIAN
  1054. case PIX_FMT_BGR24:
  1055. #endif
  1056. c->pal_rgb[i]= a + (r<<8) + (g<<16) + (b<<24);
  1057. break;
  1058. case PIX_FMT_RGB32_1:
  1059. #if HAVE_BIGENDIAN
  1060. case PIX_FMT_RGB24:
  1061. #endif
  1062. c->pal_rgb[i]= a + (b<<8) + (g<<16) + (r<<24);
  1063. break;
  1064. case PIX_FMT_RGB32:
  1065. #if !HAVE_BIGENDIAN
  1066. case PIX_FMT_BGR24:
  1067. #endif
  1068. default:
  1069. c->pal_rgb[i]= b + (g<<8) + (r<<16) + (a<<24);
  1070. }
  1071. }
  1072. }
  1073. if (c->src0Alpha && !c->dst0Alpha && isALPHA(c->dstFormat)) {
  1074. uint8_t *base;
  1075. int x,y;
  1076. rgb0_tmp = av_malloc(FFABS(srcStride[0]) * srcSliceH + 32);
  1077. base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
  1078. for (y=0; y<srcSliceH; y++){
  1079. memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*c->srcW);
  1080. for (x=c->src0Alpha-1; x<4*c->srcW; x+=4) {
  1081. base[ srcStride[0]*y + x] = 0xFF;
  1082. }
  1083. }
  1084. src2[0] = base;
  1085. }
  1086. // copy strides, so they can safely be modified
  1087. if (c->sliceDir == 1) {
  1088. // slices go from top to bottom
  1089. int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2],
  1090. srcStride[3] };
  1091. int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2],
  1092. dstStride[3] };
  1093. reset_ptr(src2, c->srcFormat);
  1094. reset_ptr((void*)dst2, c->dstFormat);
  1095. /* reset slice direction at end of frame */
  1096. if (srcSliceY + srcSliceH == c->srcH)
  1097. c->sliceDir = 0;
  1098. ret = c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2,
  1099. dstStride2);
  1100. } else {
  1101. // slices go from bottom to top => we flip the image internally
  1102. int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2],
  1103. -srcStride[3] };
  1104. int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2],
  1105. -dstStride[3] };
  1106. src2[0] += (srcSliceH - 1) * srcStride[0];
  1107. if (!usePal(c->srcFormat))
  1108. src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
  1109. src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
  1110. src2[3] += (srcSliceH - 1) * srcStride[3];
  1111. dst2[0] += ( c->dstH - 1) * dstStride[0];
  1112. dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1];
  1113. dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2];
  1114. dst2[3] += ( c->dstH - 1) * dstStride[3];
  1115. reset_ptr(src2, c->srcFormat);
  1116. reset_ptr((void*)dst2, c->dstFormat);
  1117. /* reset slice direction at end of frame */
  1118. if (!srcSliceY)
  1119. c->sliceDir = 0;
  1120. ret = c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH,
  1121. srcSliceH, dst2, dstStride2);
  1122. }
  1123. av_free(rgb0_tmp);
  1124. return ret;
  1125. }
  1126. /* Convert the palette to the same packed 32-bit format as the palette */
  1127. void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst,
  1128. int num_pixels, const uint8_t *palette)
  1129. {
  1130. int i;
  1131. for (i = 0; i < num_pixels; i++)
  1132. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]];
  1133. }
  1134. /* Palette format: ABCD -> dst format: ABC */
  1135. void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst,
  1136. int num_pixels, const uint8_t *palette)
  1137. {
  1138. int i;
  1139. for (i = 0; i < num_pixels; i++) {
  1140. //FIXME slow?
  1141. dst[0] = palette[src[i] * 4 + 0];
  1142. dst[1] = palette[src[i] * 4 + 1];
  1143. dst[2] = palette[src[i] * 4 + 2];
  1144. dst += 3;
  1145. }
  1146. }