input.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740
  1. /*
  2. * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <math.h>
  21. #include <stdint.h>
  22. #include <stdio.h>
  23. #include <string.h>
  24. #include "libavutil/avutil.h"
  25. #include "libavutil/bswap.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/mathematics.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "libavutil/avassert.h"
  31. #include "config.h"
  32. #include "rgb2rgb.h"
  33. #include "swscale.h"
  34. #include "swscale_internal.h"
  35. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  36. #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b)
  37. #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r)
  38. static av_always_inline void
  39. rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
  40. enum AVPixelFormat origin, int32_t *rgb2yuv)
  41. {
  42. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  43. int i;
  44. for (i = 0; i < width; i++) {
  45. unsigned int r_b = input_pixel(&src[i*4+0]);
  46. unsigned int g = input_pixel(&src[i*4+1]);
  47. unsigned int b_r = input_pixel(&src[i*4+2]);
  48. dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  49. }
  50. }
  51. static av_always_inline void
  52. rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
  53. const uint16_t *src1, const uint16_t *src2,
  54. int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
  55. {
  56. int i;
  57. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  58. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  59. av_assert1(src1==src2);
  60. for (i = 0; i < width; i++) {
  61. int r_b = input_pixel(&src1[i*4+0]);
  62. int g = input_pixel(&src1[i*4+1]);
  63. int b_r = input_pixel(&src1[i*4+2]);
  64. dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  65. dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  66. }
  67. }
  68. static av_always_inline void
  69. rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
  70. const uint16_t *src1, const uint16_t *src2,
  71. int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
  72. {
  73. int i;
  74. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  75. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  76. av_assert1(src1==src2);
  77. for (i = 0; i < width; i++) {
  78. unsigned r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1;
  79. unsigned g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1;
  80. unsigned b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1;
  81. dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  82. dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  83. }
  84. }
  85. #define rgb64funcs(pattern, BE_LE, origin) \
  86. static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
  87. int width, uint32_t *rgb2yuv) \
  88. { \
  89. const uint16_t *src = (const uint16_t *) _src; \
  90. uint16_t *dst = (uint16_t *) _dst; \
  91. rgb64ToY_c_template(dst, src, width, origin, rgb2yuv); \
  92. } \
  93. \
  94. static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
  95. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  96. int width, uint32_t *rgb2yuv) \
  97. { \
  98. const uint16_t *src1 = (const uint16_t *) _src1, \
  99. *src2 = (const uint16_t *) _src2; \
  100. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  101. rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  102. } \
  103. \
  104. static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
  105. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  106. int width, uint32_t *rgb2yuv) \
  107. { \
  108. const uint16_t *src1 = (const uint16_t *) _src1, \
  109. *src2 = (const uint16_t *) _src2; \
  110. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  111. rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  112. }
  113. rgb64funcs(rgb, LE, AV_PIX_FMT_RGBA64LE)
  114. rgb64funcs(rgb, BE, AV_PIX_FMT_RGBA64BE)
  115. rgb64funcs(bgr, LE, AV_PIX_FMT_BGRA64LE)
  116. rgb64funcs(bgr, BE, AV_PIX_FMT_BGRA64BE)
  117. static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
  118. const uint16_t *src, int width,
  119. enum AVPixelFormat origin,
  120. int32_t *rgb2yuv)
  121. {
  122. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  123. int i;
  124. for (i = 0; i < width; i++) {
  125. unsigned int r_b = input_pixel(&src[i * 3 + 0]);
  126. unsigned int g = input_pixel(&src[i * 3 + 1]);
  127. unsigned int b_r = input_pixel(&src[i * 3 + 2]);
  128. dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  129. }
  130. }
  131. static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
  132. uint16_t *dstV,
  133. const uint16_t *src1,
  134. const uint16_t *src2,
  135. int width,
  136. enum AVPixelFormat origin,
  137. int32_t *rgb2yuv)
  138. {
  139. int i;
  140. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  141. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  142. av_assert1(src1 == src2);
  143. for (i = 0; i < width; i++) {
  144. unsigned r_b = input_pixel(&src1[i * 3 + 0]);
  145. unsigned g = input_pixel(&src1[i * 3 + 1]);
  146. unsigned b_r = input_pixel(&src1[i * 3 + 2]);
  147. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  148. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  149. }
  150. }
  151. static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
  152. uint16_t *dstV,
  153. const uint16_t *src1,
  154. const uint16_t *src2,
  155. int width,
  156. enum AVPixelFormat origin,
  157. int32_t *rgb2yuv)
  158. {
  159. int i;
  160. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  161. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  162. av_assert1(src1 == src2);
  163. for (i = 0; i < width; i++) {
  164. unsigned r_b = (input_pixel(&src1[6 * i + 0]) +
  165. input_pixel(&src1[6 * i + 3]) + 1) >> 1;
  166. unsigned g = (input_pixel(&src1[6 * i + 1]) +
  167. input_pixel(&src1[6 * i + 4]) + 1) >> 1;
  168. unsigned b_r = (input_pixel(&src1[6 * i + 2]) +
  169. input_pixel(&src1[6 * i + 5]) + 1) >> 1;
  170. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  171. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  172. }
  173. }
  174. #undef r
  175. #undef b
  176. #undef input_pixel
  177. #define rgb48funcs(pattern, BE_LE, origin) \
  178. static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
  179. const uint8_t *_src, \
  180. const uint8_t *unused0, const uint8_t *unused1,\
  181. int width, \
  182. uint32_t *rgb2yuv) \
  183. { \
  184. const uint16_t *src = (const uint16_t *)_src; \
  185. uint16_t *dst = (uint16_t *)_dst; \
  186. rgb48ToY_c_template(dst, src, width, origin, rgb2yuv); \
  187. } \
  188. \
  189. static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
  190. uint8_t *_dstV, \
  191. const uint8_t *unused0, \
  192. const uint8_t *_src1, \
  193. const uint8_t *_src2, \
  194. int width, \
  195. uint32_t *rgb2yuv) \
  196. { \
  197. const uint16_t *src1 = (const uint16_t *)_src1, \
  198. *src2 = (const uint16_t *)_src2; \
  199. uint16_t *dstU = (uint16_t *)_dstU, \
  200. *dstV = (uint16_t *)_dstV; \
  201. rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  202. } \
  203. \
  204. static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
  205. uint8_t *_dstV, \
  206. const uint8_t *unused0, \
  207. const uint8_t *_src1, \
  208. const uint8_t *_src2, \
  209. int width, \
  210. uint32_t *rgb2yuv) \
  211. { \
  212. const uint16_t *src1 = (const uint16_t *)_src1, \
  213. *src2 = (const uint16_t *)_src2; \
  214. uint16_t *dstU = (uint16_t *)_dstU, \
  215. *dstV = (uint16_t *)_dstV; \
  216. rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  217. }
  218. rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
  219. rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
  220. rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
  221. rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
  222. #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
  223. origin == AV_PIX_FMT_BGRA || \
  224. origin == AV_PIX_FMT_ARGB || \
  225. origin == AV_PIX_FMT_ABGR) \
  226. ? AV_RN32A(&src[(i) * 4]) \
  227. : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
  228. : AV_RL16(&src[(i) * 2])))
  229. static av_always_inline void rgb16_32ToY_c_template(int16_t *dst,
  230. const uint8_t *src,
  231. int width,
  232. enum AVPixelFormat origin,
  233. int shr, int shg,
  234. int shb, int shp,
  235. int maskr, int maskg,
  236. int maskb, int rsh,
  237. int gsh, int bsh, int S,
  238. int32_t *rgb2yuv)
  239. {
  240. const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh;
  241. const unsigned rnd = (32<<((S)-1)) + (1<<(S-7));
  242. int i;
  243. for (i = 0; i < width; i++) {
  244. int px = input_pixel(i) >> shp;
  245. int b = (px & maskb) >> shb;
  246. int g = (px & maskg) >> shg;
  247. int r = (px & maskr) >> shr;
  248. dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
  249. }
  250. }
  251. static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU,
  252. int16_t *dstV,
  253. const uint8_t *src,
  254. int width,
  255. enum AVPixelFormat origin,
  256. int shr, int shg,
  257. int shb, int shp,
  258. int maskr, int maskg,
  259. int maskb, int rsh,
  260. int gsh, int bsh, int S,
  261. int32_t *rgb2yuv)
  262. {
  263. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  264. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh);
  265. const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7));
  266. int i;
  267. for (i = 0; i < width; i++) {
  268. int px = input_pixel(i) >> shp;
  269. int b = (px & maskb) >> shb;
  270. int g = (px & maskg) >> shg;
  271. int r = (px & maskr) >> shr;
  272. dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
  273. dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
  274. }
  275. }
  276. static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU,
  277. int16_t *dstV,
  278. const uint8_t *src,
  279. int width,
  280. enum AVPixelFormat origin,
  281. int shr, int shg,
  282. int shb, int shp,
  283. int maskr, int maskg,
  284. int maskb, int rsh,
  285. int gsh, int bsh, int S,
  286. int32_t *rgb2yuv)
  287. {
  288. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  289. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh),
  290. maskgx = ~(maskr | maskb);
  291. const unsigned rnd = (256U<<(S)) + (1<<(S-6));
  292. int i;
  293. maskr |= maskr << 1;
  294. maskb |= maskb << 1;
  295. maskg |= maskg << 1;
  296. for (i = 0; i < width; i++) {
  297. unsigned px0 = input_pixel(2 * i + 0) >> shp;
  298. unsigned px1 = input_pixel(2 * i + 1) >> shp;
  299. int b, r, g = (px0 & maskgx) + (px1 & maskgx);
  300. int rb = px0 + px1 - g;
  301. b = (rb & maskb) >> shb;
  302. if (shp ||
  303. origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
  304. origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
  305. g >>= shg;
  306. } else {
  307. g = (g & maskg) >> shg;
  308. }
  309. r = (rb & maskr) >> shr;
  310. dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
  311. dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
  312. }
  313. }
  314. #undef input_pixel
  315. #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
  316. maskg, maskb, rsh, gsh, bsh, S) \
  317. static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
  318. int width, uint32_t *tab) \
  319. { \
  320. rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \
  321. maskr, maskg, maskb, rsh, gsh, bsh, S, tab); \
  322. } \
  323. \
  324. static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
  325. const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
  326. int width, uint32_t *tab) \
  327. { \
  328. rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  329. shr, shg, shb, shp, \
  330. maskr, maskg, maskb, rsh, gsh, bsh, S, tab);\
  331. } \
  332. \
  333. static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
  334. const uint8_t *unused0, const uint8_t *src, \
  335. const uint8_t *dummy, \
  336. int width, uint32_t *tab) \
  337. { \
  338. rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  339. shr, shg, shb, shp, \
  340. maskr, maskg, maskb, \
  341. rsh, gsh, bsh, S, tab); \
  342. }
  343. rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  344. rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  345. rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  346. rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  347. rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  348. rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  349. rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  350. rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  351. rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  352. rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  353. rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  354. rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  355. rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  356. rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  357. rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  358. rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  359. static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV,
  360. const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  361. int width, uint32_t *rgb2yuv)
  362. {
  363. uint16_t *dstU = (uint16_t *)_dstU;
  364. uint16_t *dstV = (uint16_t *)_dstV;
  365. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  366. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  367. int i;
  368. for (i = 0; i < width; i++) {
  369. unsigned int g = gsrc[2*i] + gsrc[2*i+1];
  370. unsigned int b = bsrc[2*i] + bsrc[2*i+1];
  371. unsigned int r = rsrc[2*i] + rsrc[2*i+1];
  372. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  373. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  374. }
  375. }
  376. static void rgba64leToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  377. const uint8_t *unused2, int width, uint32_t *unused)
  378. {
  379. int16_t *dst = (int16_t *)_dst;
  380. const uint16_t *src = (const uint16_t *)_src;
  381. int i;
  382. for (i = 0; i < width; i++)
  383. dst[i] = AV_RL16(src + 4 * i + 3);
  384. }
  385. static void rgba64beToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  386. const uint8_t *unused2, int width, uint32_t *unused)
  387. {
  388. int16_t *dst = (int16_t *)_dst;
  389. const uint16_t *src = (const uint16_t *)_src;
  390. int i;
  391. for (i = 0; i < width; i++)
  392. dst[i] = AV_RB16(src + 4 * i + 3);
  393. }
  394. static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  395. {
  396. int16_t *dst = (int16_t *)_dst;
  397. int i;
  398. for (i=0; i<width; i++) {
  399. dst[i]= src[4*i]<<6 | src[4*i]>>2;
  400. }
  401. }
  402. static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  403. {
  404. int16_t *dst = (int16_t *)_dst;
  405. int i;
  406. for (i=0; i<width; i++) {
  407. dst[i]= src[4*i+3]<<6 | src[4*i+3]>>2;
  408. }
  409. }
  410. static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
  411. {
  412. int16_t *dst = (int16_t *)_dst;
  413. int i;
  414. for (i=0; i<width; i++) {
  415. int d= src[i];
  416. dst[i]= (pal[d] >> 24)<<6 | pal[d]>>26;
  417. }
  418. }
  419. static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
  420. {
  421. int16_t *dst = (int16_t *)_dst;
  422. int i;
  423. for (i = 0; i < width; i++) {
  424. int d = src[i];
  425. dst[i] = (pal[d] & 0xFF)<<6;
  426. }
  427. }
  428. static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV,
  429. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  430. int width, uint32_t *pal)
  431. {
  432. uint16_t *dstU = (uint16_t *)_dstU;
  433. int16_t *dstV = (int16_t *)_dstV;
  434. int i;
  435. av_assert1(src1 == src2);
  436. for (i = 0; i < width; i++) {
  437. int p = pal[src1[i]];
  438. dstU[i] = (uint8_t)(p>> 8)<<6;
  439. dstV[i] = (uint8_t)(p>>16)<<6;
  440. }
  441. }
  442. static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  443. {
  444. int16_t *dst = (int16_t *)_dst;
  445. int i, j;
  446. width = (width + 7) >> 3;
  447. for (i = 0; i < width; i++) {
  448. int d = ~src[i];
  449. for (j = 0; j < 8; j++)
  450. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  451. }
  452. if(width&7){
  453. int d= ~src[i];
  454. for (j = 0; j < (width&7); j++)
  455. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  456. }
  457. }
  458. static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  459. {
  460. int16_t *dst = (int16_t *)_dst;
  461. int i, j;
  462. width = (width + 7) >> 3;
  463. for (i = 0; i < width; i++) {
  464. int d = src[i];
  465. for (j = 0; j < 8; j++)
  466. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  467. }
  468. if(width&7){
  469. int d = src[i];
  470. for (j = 0; j < (width&7); j++)
  471. dst[8*i+j] = ((d>>(7-j))&1) * 16383;
  472. }
  473. }
  474. static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  475. uint32_t *unused)
  476. {
  477. int i;
  478. for (i = 0; i < width; i++)
  479. dst[i] = src[2 * i];
  480. }
  481. static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  482. const uint8_t *src2, int width, uint32_t *unused)
  483. {
  484. int i;
  485. for (i = 0; i < width; i++) {
  486. dstU[i] = src1[4 * i + 1];
  487. dstV[i] = src1[4 * i + 3];
  488. }
  489. av_assert1(src1 == src2);
  490. }
  491. static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  492. const uint8_t *src2, int width, uint32_t *unused)
  493. {
  494. int i;
  495. for (i = 0; i < width; i++) {
  496. dstV[i] = src1[4 * i + 1];
  497. dstU[i] = src1[4 * i + 3];
  498. }
  499. av_assert1(src1 == src2);
  500. }
  501. static void y210le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  502. const uint8_t *unused1, int width, uint32_t *unused2)
  503. {
  504. int i;
  505. for (i = 0; i < width; i++) {
  506. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 2) >> 6);
  507. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6) >> 6);
  508. }
  509. }
  510. static void y210le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0,
  511. const uint8_t *unused1, int width, uint32_t *unused2)
  512. {
  513. int i;
  514. for (i = 0; i < width; i++)
  515. AV_WN16(dst + i * 2, AV_RL16(src + i * 4) >> 6);
  516. }
  517. static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width,
  518. uint32_t *unused)
  519. {
  520. int i;
  521. const uint16_t *src = (const uint16_t *)_src;
  522. uint16_t *dst = (uint16_t *)_dst;
  523. for (i = 0; i < width; i++)
  524. dst[i] = av_bswap16(src[i]);
  525. }
  526. static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
  527. const uint8_t *_src2, int width, uint32_t *unused)
  528. {
  529. int i;
  530. const uint16_t *src1 = (const uint16_t *)_src1,
  531. *src2 = (const uint16_t *)_src2;
  532. uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
  533. for (i = 0; i < width; i++) {
  534. dstU[i] = av_bswap16(src1[i]);
  535. dstV[i] = av_bswap16(src2[i]);
  536. }
  537. }
  538. static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  539. uint32_t *unused)
  540. {
  541. int i;
  542. for (i = 0; i < width; i++)
  543. AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
  544. }
  545. static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  546. uint32_t *unused)
  547. {
  548. int i;
  549. for (i = 0; i < width; i++)
  550. AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
  551. }
  552. static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  553. uint32_t *unused)
  554. {
  555. int i;
  556. for (i = 0; i < width; i++)
  557. AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
  558. }
  559. static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  560. uint32_t *unused)
  561. {
  562. int i;
  563. for (i = 0; i < width; i++)
  564. AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
  565. }
  566. static void read_ayuv64le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  567. uint32_t *unused2)
  568. {
  569. int i;
  570. for (i = 0; i < width; i++)
  571. AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2));
  572. }
  573. static void read_ayuv64le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  574. const uint8_t *unused1, int width, uint32_t *unused2)
  575. {
  576. int i;
  577. for (i = 0; i < width; i++) {
  578. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 4));
  579. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6));
  580. }
  581. }
  582. static void read_ayuv64le_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  583. uint32_t *unused2)
  584. {
  585. int i;
  586. for (i = 0; i < width; i++)
  587. AV_WN16(dst + i * 2, AV_RL16(src + i * 8));
  588. }
  589. /* This is almost identical to the previous, end exists only because
  590. * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
  591. static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  592. uint32_t *unused)
  593. {
  594. int i;
  595. for (i = 0; i < width; i++)
  596. dst[i] = src[2 * i + 1];
  597. }
  598. static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  599. const uint8_t *src2, int width, uint32_t *unused)
  600. {
  601. int i;
  602. for (i = 0; i < width; i++) {
  603. dstU[i] = src1[4 * i + 0];
  604. dstV[i] = src1[4 * i + 2];
  605. }
  606. av_assert1(src1 == src2);
  607. }
  608. static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
  609. const uint8_t *src, int width)
  610. {
  611. int i;
  612. for (i = 0; i < width; i++) {
  613. dst1[i] = src[2 * i + 0];
  614. dst2[i] = src[2 * i + 1];
  615. }
  616. }
  617. static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
  618. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  619. int width, uint32_t *unused)
  620. {
  621. nvXXtoUV_c(dstU, dstV, src1, width);
  622. }
  623. static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
  624. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  625. int width, uint32_t *unused)
  626. {
  627. nvXXtoUV_c(dstV, dstU, src1, width);
  628. }
  629. static void p010LEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
  630. const uint8_t *unused2, int width, uint32_t *unused)
  631. {
  632. int i;
  633. for (i = 0; i < width; i++) {
  634. AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> 6);
  635. }
  636. }
  637. static void p010BEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
  638. const uint8_t *unused2, int width, uint32_t *unused)
  639. {
  640. int i;
  641. for (i = 0; i < width; i++) {
  642. AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> 6);
  643. }
  644. }
  645. static void p010LEToUV_c(uint8_t *dstU, uint8_t *dstV,
  646. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  647. int width, uint32_t *unused)
  648. {
  649. int i;
  650. for (i = 0; i < width; i++) {
  651. AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> 6);
  652. AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> 6);
  653. }
  654. }
  655. static void p010BEToUV_c(uint8_t *dstU, uint8_t *dstV,
  656. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  657. int width, uint32_t *unused)
  658. {
  659. int i;
  660. for (i = 0; i < width; i++) {
  661. AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> 6);
  662. AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> 6);
  663. }
  664. }
  665. static void p016LEToUV_c(uint8_t *dstU, uint8_t *dstV,
  666. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  667. int width, uint32_t *unused)
  668. {
  669. int i;
  670. for (i = 0; i < width; i++) {
  671. AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0));
  672. AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2));
  673. }
  674. }
  675. static void p016BEToUV_c(uint8_t *dstU, uint8_t *dstV,
  676. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  677. int width, uint32_t *unused)
  678. {
  679. int i;
  680. for (i = 0; i < width; i++) {
  681. AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0));
  682. AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2));
  683. }
  684. }
  685. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  686. static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
  687. int width, uint32_t *rgb2yuv)
  688. {
  689. int16_t *dst = (int16_t *)_dst;
  690. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  691. int i;
  692. for (i = 0; i < width; i++) {
  693. int b = src[i * 3 + 0];
  694. int g = src[i * 3 + 1];
  695. int r = src[i * 3 + 2];
  696. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  697. }
  698. }
  699. static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  700. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  701. {
  702. int16_t *dstU = (int16_t *)_dstU;
  703. int16_t *dstV = (int16_t *)_dstV;
  704. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  705. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  706. int i;
  707. for (i = 0; i < width; i++) {
  708. int b = src1[3 * i + 0];
  709. int g = src1[3 * i + 1];
  710. int r = src1[3 * i + 2];
  711. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  712. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  713. }
  714. av_assert1(src1 == src2);
  715. }
  716. static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  717. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  718. {
  719. int16_t *dstU = (int16_t *)_dstU;
  720. int16_t *dstV = (int16_t *)_dstV;
  721. int i;
  722. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  723. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  724. for (i = 0; i < width; i++) {
  725. int b = src1[6 * i + 0] + src1[6 * i + 3];
  726. int g = src1[6 * i + 1] + src1[6 * i + 4];
  727. int r = src1[6 * i + 2] + src1[6 * i + 5];
  728. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  729. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  730. }
  731. av_assert1(src1 == src2);
  732. }
  733. static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  734. uint32_t *rgb2yuv)
  735. {
  736. int16_t *dst = (int16_t *)_dst;
  737. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  738. int i;
  739. for (i = 0; i < width; i++) {
  740. int r = src[i * 3 + 0];
  741. int g = src[i * 3 + 1];
  742. int b = src[i * 3 + 2];
  743. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  744. }
  745. }
  746. static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  747. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  748. {
  749. int16_t *dstU = (int16_t *)_dstU;
  750. int16_t *dstV = (int16_t *)_dstV;
  751. int i;
  752. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  753. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  754. av_assert1(src1 == src2);
  755. for (i = 0; i < width; i++) {
  756. int r = src1[3 * i + 0];
  757. int g = src1[3 * i + 1];
  758. int b = src1[3 * i + 2];
  759. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  760. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  761. }
  762. }
  763. static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  764. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  765. {
  766. int16_t *dstU = (int16_t *)_dstU;
  767. int16_t *dstV = (int16_t *)_dstV;
  768. int i;
  769. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  770. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  771. av_assert1(src1 == src2);
  772. for (i = 0; i < width; i++) {
  773. int r = src1[6 * i + 0] + src1[6 * i + 3];
  774. int g = src1[6 * i + 1] + src1[6 * i + 4];
  775. int b = src1[6 * i + 2] + src1[6 * i + 5];
  776. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  777. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  778. }
  779. }
  780. static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv)
  781. {
  782. uint16_t *dst = (uint16_t *)_dst;
  783. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  784. int i;
  785. for (i = 0; i < width; i++) {
  786. int g = src[0][i];
  787. int b = src[1][i];
  788. int r = src[2][i];
  789. dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  790. }
  791. }
  792. static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused)
  793. {
  794. uint16_t *dst = (uint16_t *)_dst;
  795. int i;
  796. for (i = 0; i < width; i++)
  797. dst[i] = src[3][i] << 6;
  798. }
  799. static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv)
  800. {
  801. uint16_t *dstU = (uint16_t *)_dstU;
  802. uint16_t *dstV = (uint16_t *)_dstV;
  803. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  804. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  805. int i;
  806. for (i = 0; i < width; i++) {
  807. int g = src[0][i];
  808. int b = src[1][i];
  809. int r = src[2][i];
  810. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  811. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  812. }
  813. }
  814. #define rdpx(src) \
  815. is_be ? AV_RB16(src) : AV_RL16(src)
  816. static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
  817. int width, int bpc, int is_be, int32_t *rgb2yuv)
  818. {
  819. int i;
  820. const uint16_t **src = (const uint16_t **)_src;
  821. uint16_t *dst = (uint16_t *)_dst;
  822. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  823. int shift = bpc < 16 ? bpc : 14;
  824. for (i = 0; i < width; i++) {
  825. int g = rdpx(src[0] + i);
  826. int b = rdpx(src[1] + i);
  827. int r = rdpx(src[2] + i);
  828. dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
  829. }
  830. }
  831. static av_always_inline void planar_rgb16_to_a(uint8_t *_dst, const uint8_t *_src[4],
  832. int width, int bpc, int is_be, int32_t *rgb2yuv)
  833. {
  834. int i;
  835. const uint16_t **src = (const uint16_t **)_src;
  836. uint16_t *dst = (uint16_t *)_dst;
  837. int shift = bpc < 16 ? bpc : 14;
  838. for (i = 0; i < width; i++) {
  839. dst[i] = rdpx(src[3] + i) << (14 - shift);
  840. }
  841. }
  842. static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
  843. const uint8_t *_src[4], int width,
  844. int bpc, int is_be, int32_t *rgb2yuv)
  845. {
  846. int i;
  847. const uint16_t **src = (const uint16_t **)_src;
  848. uint16_t *dstU = (uint16_t *)_dstU;
  849. uint16_t *dstV = (uint16_t *)_dstV;
  850. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  851. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  852. int shift = bpc < 16 ? bpc : 14;
  853. for (i = 0; i < width; i++) {
  854. int g = rdpx(src[0] + i);
  855. int b = rdpx(src[1] + i);
  856. int r = rdpx(src[2] + i);
  857. dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  858. dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  859. }
  860. }
  861. #undef rdpx
  862. #define rdpx(src) (is_be ? av_int2float(AV_RB32(src)): av_int2float(AV_RL32(src)))
  863. static av_always_inline void planar_rgbf32_to_a(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  864. {
  865. int i;
  866. const float **src = (const float **)_src;
  867. uint16_t *dst = (uint16_t *)_dst;
  868. for (i = 0; i < width; i++) {
  869. dst[i] = av_clip_uint16(lrintf(65535.0f * rdpx(src[3] + i)));
  870. }
  871. }
  872. static av_always_inline void planar_rgbf32_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  873. {
  874. int i;
  875. const float **src = (const float **)_src;
  876. uint16_t *dstU = (uint16_t *)_dstU;
  877. uint16_t *dstV = (uint16_t *)_dstV;
  878. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  879. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  880. int bpc = 16;
  881. int shift = 14;
  882. for (i = 0; i < width; i++) {
  883. int g = av_clip_uint16(lrintf(65535.0f * rdpx(src[0] + i)));
  884. int b = av_clip_uint16(lrintf(65535.0f * rdpx(src[1] + i)));
  885. int r = av_clip_uint16(lrintf(65535.0f * rdpx(src[2] + i)));
  886. dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  887. dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  888. }
  889. }
  890. static av_always_inline void planar_rgbf32_to_y(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  891. {
  892. int i;
  893. const float **src = (const float **)_src;
  894. uint16_t *dst = (uint16_t *)_dst;
  895. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  896. int bpc = 16;
  897. int shift = 14;
  898. for (i = 0; i < width; i++) {
  899. int g = av_clip_uint16(lrintf(65535.0f * rdpx(src[0] + i)));
  900. int b = av_clip_uint16(lrintf(65535.0f * rdpx(src[1] + i)));
  901. int r = av_clip_uint16(lrintf(65535.0f * rdpx(src[2] + i)));
  902. dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
  903. }
  904. }
  905. #undef rdpx
  906. static av_always_inline void grayf32ToY16_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  907. const uint8_t *unused2, int width, uint32_t *unused)
  908. {
  909. int i;
  910. const float *src = (const float *)_src;
  911. uint16_t *dst = (uint16_t *)_dst;
  912. for (i = 0; i < width; ++i){
  913. dst[i] = av_clip_uint16(lrintf(65535.0f * src[i]));
  914. }
  915. }
  916. static av_always_inline void grayf32ToY16_bswap_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  917. const uint8_t *unused2, int width, uint32_t *unused)
  918. {
  919. int i;
  920. const uint32_t *src = (const uint32_t *)_src;
  921. uint16_t *dst = (uint16_t *)_dst;
  922. for (i = 0; i < width; ++i){
  923. dst[i] = av_clip_uint16(lrintf(65535.0f * av_int2float(av_bswap32(src[i]))));
  924. }
  925. }
  926. #define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \
  927. static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
  928. int w, int32_t *rgb2yuv) \
  929. { \
  930. planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \
  931. } \
  932. static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
  933. const uint8_t *src[4], int w, int32_t *rgb2yuv) \
  934. { \
  935. planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \
  936. } \
  937. #define rgb9plus_planar_transparency_funcs(nbits) \
  938. static void planar_rgb##nbits##le_to_a(uint8_t *dst, const uint8_t *src[4], \
  939. int w, int32_t *rgb2yuv) \
  940. { \
  941. planar_rgb16_to_a(dst, src, w, nbits, 0, rgb2yuv); \
  942. } \
  943. static void planar_rgb##nbits##be_to_a(uint8_t *dst, const uint8_t *src[4], \
  944. int w, int32_t *rgb2yuv) \
  945. { \
  946. planar_rgb16_to_a(dst, src, w, nbits, 1, rgb2yuv); \
  947. }
  948. #define rgb9plus_planar_funcs(nbits) \
  949. rgb9plus_planar_funcs_endian(nbits, le, 0) \
  950. rgb9plus_planar_funcs_endian(nbits, be, 1)
  951. rgb9plus_planar_funcs(9)
  952. rgb9plus_planar_funcs(10)
  953. rgb9plus_planar_funcs(12)
  954. rgb9plus_planar_funcs(14)
  955. rgb9plus_planar_funcs(16)
  956. rgb9plus_planar_transparency_funcs(10)
  957. rgb9plus_planar_transparency_funcs(12)
  958. rgb9plus_planar_transparency_funcs(16)
  959. #define rgbf32_planar_funcs_endian(endian_name, endian) \
  960. static void planar_rgbf32##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
  961. int w, int32_t *rgb2yuv) \
  962. { \
  963. planar_rgbf32_to_y(dst, src, w, endian, rgb2yuv); \
  964. } \
  965. static void planar_rgbf32##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
  966. const uint8_t *src[4], int w, int32_t *rgb2yuv) \
  967. { \
  968. planar_rgbf32_to_uv(dstU, dstV, src, w, endian, rgb2yuv); \
  969. } \
  970. static void planar_rgbf32##endian_name##_to_a(uint8_t *dst, const uint8_t *src[4], \
  971. int w, int32_t *rgb2yuv) \
  972. { \
  973. planar_rgbf32_to_a(dst, src, w, endian, rgb2yuv); \
  974. }
  975. rgbf32_planar_funcs_endian(le, 0)
  976. rgbf32_planar_funcs_endian(be, 1)
  977. av_cold void ff_sws_init_input_funcs(SwsContext *c)
  978. {
  979. enum AVPixelFormat srcFormat = c->srcFormat;
  980. c->chrToYV12 = NULL;
  981. switch (srcFormat) {
  982. case AV_PIX_FMT_YUYV422:
  983. c->chrToYV12 = yuy2ToUV_c;
  984. break;
  985. case AV_PIX_FMT_YVYU422:
  986. c->chrToYV12 = yvy2ToUV_c;
  987. break;
  988. case AV_PIX_FMT_UYVY422:
  989. c->chrToYV12 = uyvyToUV_c;
  990. break;
  991. case AV_PIX_FMT_NV12:
  992. case AV_PIX_FMT_NV24:
  993. c->chrToYV12 = nv12ToUV_c;
  994. break;
  995. case AV_PIX_FMT_NV21:
  996. case AV_PIX_FMT_NV42:
  997. c->chrToYV12 = nv21ToUV_c;
  998. break;
  999. case AV_PIX_FMT_RGB8:
  1000. case AV_PIX_FMT_BGR8:
  1001. case AV_PIX_FMT_PAL8:
  1002. case AV_PIX_FMT_BGR4_BYTE:
  1003. case AV_PIX_FMT_RGB4_BYTE:
  1004. c->chrToYV12 = palToUV_c;
  1005. break;
  1006. case AV_PIX_FMT_GBRP9LE:
  1007. c->readChrPlanar = planar_rgb9le_to_uv;
  1008. break;
  1009. case AV_PIX_FMT_GBRAP10LE:
  1010. case AV_PIX_FMT_GBRP10LE:
  1011. c->readChrPlanar = planar_rgb10le_to_uv;
  1012. break;
  1013. case AV_PIX_FMT_GBRAP12LE:
  1014. case AV_PIX_FMT_GBRP12LE:
  1015. c->readChrPlanar = planar_rgb12le_to_uv;
  1016. break;
  1017. case AV_PIX_FMT_GBRP14LE:
  1018. c->readChrPlanar = planar_rgb14le_to_uv;
  1019. break;
  1020. case AV_PIX_FMT_GBRAP16LE:
  1021. case AV_PIX_FMT_GBRP16LE:
  1022. c->readChrPlanar = planar_rgb16le_to_uv;
  1023. break;
  1024. case AV_PIX_FMT_GBRAPF32LE:
  1025. case AV_PIX_FMT_GBRPF32LE:
  1026. c->readChrPlanar = planar_rgbf32le_to_uv;
  1027. break;
  1028. case AV_PIX_FMT_GBRP9BE:
  1029. c->readChrPlanar = planar_rgb9be_to_uv;
  1030. break;
  1031. case AV_PIX_FMT_GBRAP10BE:
  1032. case AV_PIX_FMT_GBRP10BE:
  1033. c->readChrPlanar = planar_rgb10be_to_uv;
  1034. break;
  1035. case AV_PIX_FMT_GBRAP12BE:
  1036. case AV_PIX_FMT_GBRP12BE:
  1037. c->readChrPlanar = planar_rgb12be_to_uv;
  1038. break;
  1039. case AV_PIX_FMT_GBRP14BE:
  1040. c->readChrPlanar = planar_rgb14be_to_uv;
  1041. break;
  1042. case AV_PIX_FMT_GBRAP16BE:
  1043. case AV_PIX_FMT_GBRP16BE:
  1044. c->readChrPlanar = planar_rgb16be_to_uv;
  1045. break;
  1046. case AV_PIX_FMT_GBRAPF32BE:
  1047. case AV_PIX_FMT_GBRPF32BE:
  1048. c->readChrPlanar = planar_rgbf32be_to_uv;
  1049. break;
  1050. case AV_PIX_FMT_GBRAP:
  1051. case AV_PIX_FMT_GBRP:
  1052. c->readChrPlanar = planar_rgb_to_uv;
  1053. break;
  1054. #if HAVE_BIGENDIAN
  1055. case AV_PIX_FMT_YUV420P9LE:
  1056. case AV_PIX_FMT_YUV422P9LE:
  1057. case AV_PIX_FMT_YUV444P9LE:
  1058. case AV_PIX_FMT_YUV420P10LE:
  1059. case AV_PIX_FMT_YUV422P10LE:
  1060. case AV_PIX_FMT_YUV440P10LE:
  1061. case AV_PIX_FMT_YUV444P10LE:
  1062. case AV_PIX_FMT_YUV420P12LE:
  1063. case AV_PIX_FMT_YUV422P12LE:
  1064. case AV_PIX_FMT_YUV440P12LE:
  1065. case AV_PIX_FMT_YUV444P12LE:
  1066. case AV_PIX_FMT_YUV420P14LE:
  1067. case AV_PIX_FMT_YUV422P14LE:
  1068. case AV_PIX_FMT_YUV444P14LE:
  1069. case AV_PIX_FMT_YUV420P16LE:
  1070. case AV_PIX_FMT_YUV422P16LE:
  1071. case AV_PIX_FMT_YUV444P16LE:
  1072. case AV_PIX_FMT_YUVA420P9LE:
  1073. case AV_PIX_FMT_YUVA422P9LE:
  1074. case AV_PIX_FMT_YUVA444P9LE:
  1075. case AV_PIX_FMT_YUVA420P10LE:
  1076. case AV_PIX_FMT_YUVA422P10LE:
  1077. case AV_PIX_FMT_YUVA444P10LE:
  1078. case AV_PIX_FMT_YUVA422P12LE:
  1079. case AV_PIX_FMT_YUVA444P12LE:
  1080. case AV_PIX_FMT_YUVA420P16LE:
  1081. case AV_PIX_FMT_YUVA422P16LE:
  1082. case AV_PIX_FMT_YUVA444P16LE:
  1083. c->chrToYV12 = bswap16UV_c;
  1084. break;
  1085. #else
  1086. case AV_PIX_FMT_YUV420P9BE:
  1087. case AV_PIX_FMT_YUV422P9BE:
  1088. case AV_PIX_FMT_YUV444P9BE:
  1089. case AV_PIX_FMT_YUV420P10BE:
  1090. case AV_PIX_FMT_YUV422P10BE:
  1091. case AV_PIX_FMT_YUV440P10BE:
  1092. case AV_PIX_FMT_YUV444P10BE:
  1093. case AV_PIX_FMT_YUV420P12BE:
  1094. case AV_PIX_FMT_YUV422P12BE:
  1095. case AV_PIX_FMT_YUV440P12BE:
  1096. case AV_PIX_FMT_YUV444P12BE:
  1097. case AV_PIX_FMT_YUV420P14BE:
  1098. case AV_PIX_FMT_YUV422P14BE:
  1099. case AV_PIX_FMT_YUV444P14BE:
  1100. case AV_PIX_FMT_YUV420P16BE:
  1101. case AV_PIX_FMT_YUV422P16BE:
  1102. case AV_PIX_FMT_YUV444P16BE:
  1103. case AV_PIX_FMT_YUVA420P9BE:
  1104. case AV_PIX_FMT_YUVA422P9BE:
  1105. case AV_PIX_FMT_YUVA444P9BE:
  1106. case AV_PIX_FMT_YUVA420P10BE:
  1107. case AV_PIX_FMT_YUVA422P10BE:
  1108. case AV_PIX_FMT_YUVA444P10BE:
  1109. case AV_PIX_FMT_YUVA422P12BE:
  1110. case AV_PIX_FMT_YUVA444P12BE:
  1111. case AV_PIX_FMT_YUVA420P16BE:
  1112. case AV_PIX_FMT_YUVA422P16BE:
  1113. case AV_PIX_FMT_YUVA444P16BE:
  1114. c->chrToYV12 = bswap16UV_c;
  1115. break;
  1116. #endif
  1117. case AV_PIX_FMT_AYUV64LE:
  1118. c->chrToYV12 = read_ayuv64le_UV_c;
  1119. break;
  1120. case AV_PIX_FMT_P010LE:
  1121. c->chrToYV12 = p010LEToUV_c;
  1122. break;
  1123. case AV_PIX_FMT_P010BE:
  1124. c->chrToYV12 = p010BEToUV_c;
  1125. break;
  1126. case AV_PIX_FMT_P016LE:
  1127. c->chrToYV12 = p016LEToUV_c;
  1128. break;
  1129. case AV_PIX_FMT_P016BE:
  1130. c->chrToYV12 = p016BEToUV_c;
  1131. break;
  1132. case AV_PIX_FMT_Y210LE:
  1133. c->chrToYV12 = y210le_UV_c;
  1134. break;
  1135. }
  1136. if (c->chrSrcHSubSample) {
  1137. switch (srcFormat) {
  1138. case AV_PIX_FMT_RGBA64BE:
  1139. c->chrToYV12 = rgb64BEToUV_half_c;
  1140. break;
  1141. case AV_PIX_FMT_RGBA64LE:
  1142. c->chrToYV12 = rgb64LEToUV_half_c;
  1143. break;
  1144. case AV_PIX_FMT_BGRA64BE:
  1145. c->chrToYV12 = bgr64BEToUV_half_c;
  1146. break;
  1147. case AV_PIX_FMT_BGRA64LE:
  1148. c->chrToYV12 = bgr64LEToUV_half_c;
  1149. break;
  1150. case AV_PIX_FMT_RGB48BE:
  1151. c->chrToYV12 = rgb48BEToUV_half_c;
  1152. break;
  1153. case AV_PIX_FMT_RGB48LE:
  1154. c->chrToYV12 = rgb48LEToUV_half_c;
  1155. break;
  1156. case AV_PIX_FMT_BGR48BE:
  1157. c->chrToYV12 = bgr48BEToUV_half_c;
  1158. break;
  1159. case AV_PIX_FMT_BGR48LE:
  1160. c->chrToYV12 = bgr48LEToUV_half_c;
  1161. break;
  1162. case AV_PIX_FMT_RGB32:
  1163. c->chrToYV12 = bgr32ToUV_half_c;
  1164. break;
  1165. case AV_PIX_FMT_RGB32_1:
  1166. c->chrToYV12 = bgr321ToUV_half_c;
  1167. break;
  1168. case AV_PIX_FMT_BGR24:
  1169. c->chrToYV12 = bgr24ToUV_half_c;
  1170. break;
  1171. case AV_PIX_FMT_BGR565LE:
  1172. c->chrToYV12 = bgr16leToUV_half_c;
  1173. break;
  1174. case AV_PIX_FMT_BGR565BE:
  1175. c->chrToYV12 = bgr16beToUV_half_c;
  1176. break;
  1177. case AV_PIX_FMT_BGR555LE:
  1178. c->chrToYV12 = bgr15leToUV_half_c;
  1179. break;
  1180. case AV_PIX_FMT_BGR555BE:
  1181. c->chrToYV12 = bgr15beToUV_half_c;
  1182. break;
  1183. case AV_PIX_FMT_GBRAP:
  1184. case AV_PIX_FMT_GBRP:
  1185. c->chrToYV12 = gbr24pToUV_half_c;
  1186. break;
  1187. case AV_PIX_FMT_BGR444LE:
  1188. c->chrToYV12 = bgr12leToUV_half_c;
  1189. break;
  1190. case AV_PIX_FMT_BGR444BE:
  1191. c->chrToYV12 = bgr12beToUV_half_c;
  1192. break;
  1193. case AV_PIX_FMT_BGR32:
  1194. c->chrToYV12 = rgb32ToUV_half_c;
  1195. break;
  1196. case AV_PIX_FMT_BGR32_1:
  1197. c->chrToYV12 = rgb321ToUV_half_c;
  1198. break;
  1199. case AV_PIX_FMT_RGB24:
  1200. c->chrToYV12 = rgb24ToUV_half_c;
  1201. break;
  1202. case AV_PIX_FMT_RGB565LE:
  1203. c->chrToYV12 = rgb16leToUV_half_c;
  1204. break;
  1205. case AV_PIX_FMT_RGB565BE:
  1206. c->chrToYV12 = rgb16beToUV_half_c;
  1207. break;
  1208. case AV_PIX_FMT_RGB555LE:
  1209. c->chrToYV12 = rgb15leToUV_half_c;
  1210. break;
  1211. case AV_PIX_FMT_RGB555BE:
  1212. c->chrToYV12 = rgb15beToUV_half_c;
  1213. break;
  1214. case AV_PIX_FMT_RGB444LE:
  1215. c->chrToYV12 = rgb12leToUV_half_c;
  1216. break;
  1217. case AV_PIX_FMT_RGB444BE:
  1218. c->chrToYV12 = rgb12beToUV_half_c;
  1219. break;
  1220. }
  1221. } else {
  1222. switch (srcFormat) {
  1223. case AV_PIX_FMT_RGBA64BE:
  1224. c->chrToYV12 = rgb64BEToUV_c;
  1225. break;
  1226. case AV_PIX_FMT_RGBA64LE:
  1227. c->chrToYV12 = rgb64LEToUV_c;
  1228. break;
  1229. case AV_PIX_FMT_BGRA64BE:
  1230. c->chrToYV12 = bgr64BEToUV_c;
  1231. break;
  1232. case AV_PIX_FMT_BGRA64LE:
  1233. c->chrToYV12 = bgr64LEToUV_c;
  1234. break;
  1235. case AV_PIX_FMT_RGB48BE:
  1236. c->chrToYV12 = rgb48BEToUV_c;
  1237. break;
  1238. case AV_PIX_FMT_RGB48LE:
  1239. c->chrToYV12 = rgb48LEToUV_c;
  1240. break;
  1241. case AV_PIX_FMT_BGR48BE:
  1242. c->chrToYV12 = bgr48BEToUV_c;
  1243. break;
  1244. case AV_PIX_FMT_BGR48LE:
  1245. c->chrToYV12 = bgr48LEToUV_c;
  1246. break;
  1247. case AV_PIX_FMT_RGB32:
  1248. c->chrToYV12 = bgr32ToUV_c;
  1249. break;
  1250. case AV_PIX_FMT_RGB32_1:
  1251. c->chrToYV12 = bgr321ToUV_c;
  1252. break;
  1253. case AV_PIX_FMT_BGR24:
  1254. c->chrToYV12 = bgr24ToUV_c;
  1255. break;
  1256. case AV_PIX_FMT_BGR565LE:
  1257. c->chrToYV12 = bgr16leToUV_c;
  1258. break;
  1259. case AV_PIX_FMT_BGR565BE:
  1260. c->chrToYV12 = bgr16beToUV_c;
  1261. break;
  1262. case AV_PIX_FMT_BGR555LE:
  1263. c->chrToYV12 = bgr15leToUV_c;
  1264. break;
  1265. case AV_PIX_FMT_BGR555BE:
  1266. c->chrToYV12 = bgr15beToUV_c;
  1267. break;
  1268. case AV_PIX_FMT_BGR444LE:
  1269. c->chrToYV12 = bgr12leToUV_c;
  1270. break;
  1271. case AV_PIX_FMT_BGR444BE:
  1272. c->chrToYV12 = bgr12beToUV_c;
  1273. break;
  1274. case AV_PIX_FMT_BGR32:
  1275. c->chrToYV12 = rgb32ToUV_c;
  1276. break;
  1277. case AV_PIX_FMT_BGR32_1:
  1278. c->chrToYV12 = rgb321ToUV_c;
  1279. break;
  1280. case AV_PIX_FMT_RGB24:
  1281. c->chrToYV12 = rgb24ToUV_c;
  1282. break;
  1283. case AV_PIX_FMT_RGB565LE:
  1284. c->chrToYV12 = rgb16leToUV_c;
  1285. break;
  1286. case AV_PIX_FMT_RGB565BE:
  1287. c->chrToYV12 = rgb16beToUV_c;
  1288. break;
  1289. case AV_PIX_FMT_RGB555LE:
  1290. c->chrToYV12 = rgb15leToUV_c;
  1291. break;
  1292. case AV_PIX_FMT_RGB555BE:
  1293. c->chrToYV12 = rgb15beToUV_c;
  1294. break;
  1295. case AV_PIX_FMT_RGB444LE:
  1296. c->chrToYV12 = rgb12leToUV_c;
  1297. break;
  1298. case AV_PIX_FMT_RGB444BE:
  1299. c->chrToYV12 = rgb12beToUV_c;
  1300. break;
  1301. }
  1302. }
  1303. c->lumToYV12 = NULL;
  1304. c->alpToYV12 = NULL;
  1305. switch (srcFormat) {
  1306. case AV_PIX_FMT_GBRP9LE:
  1307. c->readLumPlanar = planar_rgb9le_to_y;
  1308. break;
  1309. case AV_PIX_FMT_GBRAP10LE:
  1310. c->readAlpPlanar = planar_rgb10le_to_a;
  1311. case AV_PIX_FMT_GBRP10LE:
  1312. c->readLumPlanar = planar_rgb10le_to_y;
  1313. break;
  1314. case AV_PIX_FMT_GBRAP12LE:
  1315. c->readAlpPlanar = planar_rgb12le_to_a;
  1316. case AV_PIX_FMT_GBRP12LE:
  1317. c->readLumPlanar = planar_rgb12le_to_y;
  1318. break;
  1319. case AV_PIX_FMT_GBRP14LE:
  1320. c->readLumPlanar = planar_rgb14le_to_y;
  1321. break;
  1322. case AV_PIX_FMT_GBRAP16LE:
  1323. c->readAlpPlanar = planar_rgb16le_to_a;
  1324. case AV_PIX_FMT_GBRP16LE:
  1325. c->readLumPlanar = planar_rgb16le_to_y;
  1326. break;
  1327. case AV_PIX_FMT_GBRAPF32LE:
  1328. c->readAlpPlanar = planar_rgbf32le_to_a;
  1329. case AV_PIX_FMT_GBRPF32LE:
  1330. c->readLumPlanar = planar_rgbf32le_to_y;
  1331. break;
  1332. case AV_PIX_FMT_GBRP9BE:
  1333. c->readLumPlanar = planar_rgb9be_to_y;
  1334. break;
  1335. case AV_PIX_FMT_GBRAP10BE:
  1336. c->readAlpPlanar = planar_rgb10be_to_a;
  1337. case AV_PIX_FMT_GBRP10BE:
  1338. c->readLumPlanar = planar_rgb10be_to_y;
  1339. break;
  1340. case AV_PIX_FMT_GBRAP12BE:
  1341. c->readAlpPlanar = planar_rgb12be_to_a;
  1342. case AV_PIX_FMT_GBRP12BE:
  1343. c->readLumPlanar = planar_rgb12be_to_y;
  1344. break;
  1345. case AV_PIX_FMT_GBRP14BE:
  1346. c->readLumPlanar = planar_rgb14be_to_y;
  1347. break;
  1348. case AV_PIX_FMT_GBRAP16BE:
  1349. c->readAlpPlanar = planar_rgb16be_to_a;
  1350. case AV_PIX_FMT_GBRP16BE:
  1351. c->readLumPlanar = planar_rgb16be_to_y;
  1352. break;
  1353. case AV_PIX_FMT_GBRAPF32BE:
  1354. c->readAlpPlanar = planar_rgbf32be_to_a;
  1355. case AV_PIX_FMT_GBRPF32BE:
  1356. c->readLumPlanar = planar_rgbf32be_to_y;
  1357. break;
  1358. case AV_PIX_FMT_GBRAP:
  1359. c->readAlpPlanar = planar_rgb_to_a;
  1360. case AV_PIX_FMT_GBRP:
  1361. c->readLumPlanar = planar_rgb_to_y;
  1362. break;
  1363. #if HAVE_BIGENDIAN
  1364. case AV_PIX_FMT_YUV420P9LE:
  1365. case AV_PIX_FMT_YUV422P9LE:
  1366. case AV_PIX_FMT_YUV444P9LE:
  1367. case AV_PIX_FMT_YUV420P10LE:
  1368. case AV_PIX_FMT_YUV422P10LE:
  1369. case AV_PIX_FMT_YUV440P10LE:
  1370. case AV_PIX_FMT_YUV444P10LE:
  1371. case AV_PIX_FMT_YUV420P12LE:
  1372. case AV_PIX_FMT_YUV422P12LE:
  1373. case AV_PIX_FMT_YUV440P12LE:
  1374. case AV_PIX_FMT_YUV444P12LE:
  1375. case AV_PIX_FMT_YUV420P14LE:
  1376. case AV_PIX_FMT_YUV422P14LE:
  1377. case AV_PIX_FMT_YUV444P14LE:
  1378. case AV_PIX_FMT_YUV420P16LE:
  1379. case AV_PIX_FMT_YUV422P16LE:
  1380. case AV_PIX_FMT_YUV444P16LE:
  1381. case AV_PIX_FMT_GRAY9LE:
  1382. case AV_PIX_FMT_GRAY10LE:
  1383. case AV_PIX_FMT_GRAY12LE:
  1384. case AV_PIX_FMT_GRAY14LE:
  1385. case AV_PIX_FMT_GRAY16LE:
  1386. case AV_PIX_FMT_P016LE:
  1387. c->lumToYV12 = bswap16Y_c;
  1388. break;
  1389. case AV_PIX_FMT_YUVA420P9LE:
  1390. case AV_PIX_FMT_YUVA422P9LE:
  1391. case AV_PIX_FMT_YUVA444P9LE:
  1392. case AV_PIX_FMT_YUVA420P10LE:
  1393. case AV_PIX_FMT_YUVA422P10LE:
  1394. case AV_PIX_FMT_YUVA444P10LE:
  1395. case AV_PIX_FMT_YUVA422P12LE:
  1396. case AV_PIX_FMT_YUVA444P12LE:
  1397. case AV_PIX_FMT_YUVA420P16LE:
  1398. case AV_PIX_FMT_YUVA422P16LE:
  1399. case AV_PIX_FMT_YUVA444P16LE:
  1400. c->lumToYV12 = bswap16Y_c;
  1401. c->alpToYV12 = bswap16Y_c;
  1402. break;
  1403. #else
  1404. case AV_PIX_FMT_YUV420P9BE:
  1405. case AV_PIX_FMT_YUV422P9BE:
  1406. case AV_PIX_FMT_YUV444P9BE:
  1407. case AV_PIX_FMT_YUV420P10BE:
  1408. case AV_PIX_FMT_YUV422P10BE:
  1409. case AV_PIX_FMT_YUV440P10BE:
  1410. case AV_PIX_FMT_YUV444P10BE:
  1411. case AV_PIX_FMT_YUV420P12BE:
  1412. case AV_PIX_FMT_YUV422P12BE:
  1413. case AV_PIX_FMT_YUV440P12BE:
  1414. case AV_PIX_FMT_YUV444P12BE:
  1415. case AV_PIX_FMT_YUV420P14BE:
  1416. case AV_PIX_FMT_YUV422P14BE:
  1417. case AV_PIX_FMT_YUV444P14BE:
  1418. case AV_PIX_FMT_YUV420P16BE:
  1419. case AV_PIX_FMT_YUV422P16BE:
  1420. case AV_PIX_FMT_YUV444P16BE:
  1421. case AV_PIX_FMT_GRAY9BE:
  1422. case AV_PIX_FMT_GRAY10BE:
  1423. case AV_PIX_FMT_GRAY12BE:
  1424. case AV_PIX_FMT_GRAY14BE:
  1425. case AV_PIX_FMT_GRAY16BE:
  1426. case AV_PIX_FMT_P016BE:
  1427. c->lumToYV12 = bswap16Y_c;
  1428. break;
  1429. case AV_PIX_FMT_YUVA420P9BE:
  1430. case AV_PIX_FMT_YUVA422P9BE:
  1431. case AV_PIX_FMT_YUVA444P9BE:
  1432. case AV_PIX_FMT_YUVA420P10BE:
  1433. case AV_PIX_FMT_YUVA422P10BE:
  1434. case AV_PIX_FMT_YUVA444P10BE:
  1435. case AV_PIX_FMT_YUVA422P12BE:
  1436. case AV_PIX_FMT_YUVA444P12BE:
  1437. case AV_PIX_FMT_YUVA420P16BE:
  1438. case AV_PIX_FMT_YUVA422P16BE:
  1439. case AV_PIX_FMT_YUVA444P16BE:
  1440. c->lumToYV12 = bswap16Y_c;
  1441. c->alpToYV12 = bswap16Y_c;
  1442. break;
  1443. #endif
  1444. case AV_PIX_FMT_YA16LE:
  1445. c->lumToYV12 = read_ya16le_gray_c;
  1446. break;
  1447. case AV_PIX_FMT_YA16BE:
  1448. c->lumToYV12 = read_ya16be_gray_c;
  1449. break;
  1450. case AV_PIX_FMT_AYUV64LE:
  1451. c->lumToYV12 = read_ayuv64le_Y_c;
  1452. break;
  1453. case AV_PIX_FMT_YUYV422:
  1454. case AV_PIX_FMT_YVYU422:
  1455. case AV_PIX_FMT_YA8:
  1456. c->lumToYV12 = yuy2ToY_c;
  1457. break;
  1458. case AV_PIX_FMT_UYVY422:
  1459. c->lumToYV12 = uyvyToY_c;
  1460. break;
  1461. case AV_PIX_FMT_BGR24:
  1462. c->lumToYV12 = bgr24ToY_c;
  1463. break;
  1464. case AV_PIX_FMT_BGR565LE:
  1465. c->lumToYV12 = bgr16leToY_c;
  1466. break;
  1467. case AV_PIX_FMT_BGR565BE:
  1468. c->lumToYV12 = bgr16beToY_c;
  1469. break;
  1470. case AV_PIX_FMT_BGR555LE:
  1471. c->lumToYV12 = bgr15leToY_c;
  1472. break;
  1473. case AV_PIX_FMT_BGR555BE:
  1474. c->lumToYV12 = bgr15beToY_c;
  1475. break;
  1476. case AV_PIX_FMT_BGR444LE:
  1477. c->lumToYV12 = bgr12leToY_c;
  1478. break;
  1479. case AV_PIX_FMT_BGR444BE:
  1480. c->lumToYV12 = bgr12beToY_c;
  1481. break;
  1482. case AV_PIX_FMT_RGB24:
  1483. c->lumToYV12 = rgb24ToY_c;
  1484. break;
  1485. case AV_PIX_FMT_RGB565LE:
  1486. c->lumToYV12 = rgb16leToY_c;
  1487. break;
  1488. case AV_PIX_FMT_RGB565BE:
  1489. c->lumToYV12 = rgb16beToY_c;
  1490. break;
  1491. case AV_PIX_FMT_RGB555LE:
  1492. c->lumToYV12 = rgb15leToY_c;
  1493. break;
  1494. case AV_PIX_FMT_RGB555BE:
  1495. c->lumToYV12 = rgb15beToY_c;
  1496. break;
  1497. case AV_PIX_FMT_RGB444LE:
  1498. c->lumToYV12 = rgb12leToY_c;
  1499. break;
  1500. case AV_PIX_FMT_RGB444BE:
  1501. c->lumToYV12 = rgb12beToY_c;
  1502. break;
  1503. case AV_PIX_FMT_RGB8:
  1504. case AV_PIX_FMT_BGR8:
  1505. case AV_PIX_FMT_PAL8:
  1506. case AV_PIX_FMT_BGR4_BYTE:
  1507. case AV_PIX_FMT_RGB4_BYTE:
  1508. c->lumToYV12 = palToY_c;
  1509. break;
  1510. case AV_PIX_FMT_MONOBLACK:
  1511. c->lumToYV12 = monoblack2Y_c;
  1512. break;
  1513. case AV_PIX_FMT_MONOWHITE:
  1514. c->lumToYV12 = monowhite2Y_c;
  1515. break;
  1516. case AV_PIX_FMT_RGB32:
  1517. c->lumToYV12 = bgr32ToY_c;
  1518. break;
  1519. case AV_PIX_FMT_RGB32_1:
  1520. c->lumToYV12 = bgr321ToY_c;
  1521. break;
  1522. case AV_PIX_FMT_BGR32:
  1523. c->lumToYV12 = rgb32ToY_c;
  1524. break;
  1525. case AV_PIX_FMT_BGR32_1:
  1526. c->lumToYV12 = rgb321ToY_c;
  1527. break;
  1528. case AV_PIX_FMT_RGB48BE:
  1529. c->lumToYV12 = rgb48BEToY_c;
  1530. break;
  1531. case AV_PIX_FMT_RGB48LE:
  1532. c->lumToYV12 = rgb48LEToY_c;
  1533. break;
  1534. case AV_PIX_FMT_BGR48BE:
  1535. c->lumToYV12 = bgr48BEToY_c;
  1536. break;
  1537. case AV_PIX_FMT_BGR48LE:
  1538. c->lumToYV12 = bgr48LEToY_c;
  1539. break;
  1540. case AV_PIX_FMT_RGBA64BE:
  1541. c->lumToYV12 = rgb64BEToY_c;
  1542. break;
  1543. case AV_PIX_FMT_RGBA64LE:
  1544. c->lumToYV12 = rgb64LEToY_c;
  1545. break;
  1546. case AV_PIX_FMT_BGRA64BE:
  1547. c->lumToYV12 = bgr64BEToY_c;
  1548. break;
  1549. case AV_PIX_FMT_BGRA64LE:
  1550. c->lumToYV12 = bgr64LEToY_c;
  1551. break;
  1552. case AV_PIX_FMT_P010LE:
  1553. c->lumToYV12 = p010LEToY_c;
  1554. break;
  1555. case AV_PIX_FMT_P010BE:
  1556. c->lumToYV12 = p010BEToY_c;
  1557. break;
  1558. case AV_PIX_FMT_GRAYF32LE:
  1559. #if HAVE_BIGENDIAN
  1560. c->lumToYV12 = grayf32ToY16_bswap_c;
  1561. #else
  1562. c->lumToYV12 = grayf32ToY16_c;
  1563. #endif
  1564. break;
  1565. case AV_PIX_FMT_GRAYF32BE:
  1566. #if HAVE_BIGENDIAN
  1567. c->lumToYV12 = grayf32ToY16_c;
  1568. #else
  1569. c->lumToYV12 = grayf32ToY16_bswap_c;
  1570. #endif
  1571. break;
  1572. case AV_PIX_FMT_Y210LE:
  1573. c->lumToYV12 = y210le_Y_c;
  1574. break;
  1575. }
  1576. if (c->needAlpha) {
  1577. if (is16BPS(srcFormat) || isNBPS(srcFormat)) {
  1578. if (HAVE_BIGENDIAN == !isBE(srcFormat) && !c->readAlpPlanar)
  1579. c->alpToYV12 = bswap16Y_c;
  1580. }
  1581. switch (srcFormat) {
  1582. case AV_PIX_FMT_BGRA64LE:
  1583. case AV_PIX_FMT_RGBA64LE: c->alpToYV12 = rgba64leToA_c; break;
  1584. case AV_PIX_FMT_BGRA64BE:
  1585. case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64beToA_c; break;
  1586. case AV_PIX_FMT_BGRA:
  1587. case AV_PIX_FMT_RGBA:
  1588. c->alpToYV12 = rgbaToA_c;
  1589. break;
  1590. case AV_PIX_FMT_ABGR:
  1591. case AV_PIX_FMT_ARGB:
  1592. c->alpToYV12 = abgrToA_c;
  1593. break;
  1594. case AV_PIX_FMT_YA8:
  1595. c->alpToYV12 = uyvyToY_c;
  1596. break;
  1597. case AV_PIX_FMT_YA16LE:
  1598. c->alpToYV12 = read_ya16le_alpha_c;
  1599. break;
  1600. case AV_PIX_FMT_YA16BE:
  1601. c->alpToYV12 = read_ya16be_alpha_c;
  1602. break;
  1603. case AV_PIX_FMT_AYUV64LE:
  1604. c->alpToYV12 = read_ayuv64le_A_c;
  1605. break;
  1606. case AV_PIX_FMT_PAL8 :
  1607. c->alpToYV12 = palToA_c;
  1608. break;
  1609. }
  1610. }
  1611. }