input.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526
  1. /*
  2. * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <math.h>
  21. #include <stdint.h>
  22. #include <stdio.h>
  23. #include <string.h>
  24. #include "libavutil/avutil.h"
  25. #include "libavutil/bswap.h"
  26. #include "libavutil/cpu.h"
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/mathematics.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "libavutil/avassert.h"
  31. #include "config.h"
  32. #include "rgb2rgb.h"
  33. #include "swscale.h"
  34. #include "swscale_internal.h"
  35. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  36. #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b)
  37. #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r)
  38. static av_always_inline void
  39. rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
  40. enum AVPixelFormat origin, int32_t *rgb2yuv)
  41. {
  42. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  43. int i;
  44. for (i = 0; i < width; i++) {
  45. unsigned int r_b = input_pixel(&src[i*4+0]);
  46. unsigned int g = input_pixel(&src[i*4+1]);
  47. unsigned int b_r = input_pixel(&src[i*4+2]);
  48. dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  49. }
  50. }
  51. static av_always_inline void
  52. rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
  53. const uint16_t *src1, const uint16_t *src2,
  54. int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
  55. {
  56. int i;
  57. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  58. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  59. av_assert1(src1==src2);
  60. for (i = 0; i < width; i++) {
  61. int r_b = input_pixel(&src1[i*4+0]);
  62. int g = input_pixel(&src1[i*4+1]);
  63. int b_r = input_pixel(&src1[i*4+2]);
  64. dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  65. dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  66. }
  67. }
  68. static av_always_inline void
  69. rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
  70. const uint16_t *src1, const uint16_t *src2,
  71. int width, enum AVPixelFormat origin, int32_t *rgb2yuv)
  72. {
  73. int i;
  74. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  75. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  76. av_assert1(src1==src2);
  77. for (i = 0; i < width; i++) {
  78. int r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1;
  79. int g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1;
  80. int b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1;
  81. dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  82. dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  83. }
  84. }
  85. #define rgb64funcs(pattern, BE_LE, origin) \
  86. static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
  87. int width, uint32_t *rgb2yuv) \
  88. { \
  89. const uint16_t *src = (const uint16_t *) _src; \
  90. uint16_t *dst = (uint16_t *) _dst; \
  91. rgb64ToY_c_template(dst, src, width, origin, rgb2yuv); \
  92. } \
  93. \
  94. static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
  95. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  96. int width, uint32_t *rgb2yuv) \
  97. { \
  98. const uint16_t *src1 = (const uint16_t *) _src1, \
  99. *src2 = (const uint16_t *) _src2; \
  100. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  101. rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  102. } \
  103. \
  104. static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
  105. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  106. int width, uint32_t *rgb2yuv) \
  107. { \
  108. const uint16_t *src1 = (const uint16_t *) _src1, \
  109. *src2 = (const uint16_t *) _src2; \
  110. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  111. rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  112. }
  113. rgb64funcs(rgb, LE, AV_PIX_FMT_RGBA64LE)
  114. rgb64funcs(rgb, BE, AV_PIX_FMT_RGBA64BE)
  115. rgb64funcs(bgr, LE, AV_PIX_FMT_BGRA64LE)
  116. rgb64funcs(bgr, BE, AV_PIX_FMT_BGRA64BE)
  117. static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
  118. const uint16_t *src, int width,
  119. enum AVPixelFormat origin,
  120. int32_t *rgb2yuv)
  121. {
  122. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  123. int i;
  124. for (i = 0; i < width; i++) {
  125. unsigned int r_b = input_pixel(&src[i * 3 + 0]);
  126. unsigned int g = input_pixel(&src[i * 3 + 1]);
  127. unsigned int b_r = input_pixel(&src[i * 3 + 2]);
  128. dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  129. }
  130. }
  131. static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
  132. uint16_t *dstV,
  133. const uint16_t *src1,
  134. const uint16_t *src2,
  135. int width,
  136. enum AVPixelFormat origin,
  137. int32_t *rgb2yuv)
  138. {
  139. int i;
  140. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  141. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  142. av_assert1(src1 == src2);
  143. for (i = 0; i < width; i++) {
  144. int r_b = input_pixel(&src1[i * 3 + 0]);
  145. int g = input_pixel(&src1[i * 3 + 1]);
  146. int b_r = input_pixel(&src1[i * 3 + 2]);
  147. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  148. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  149. }
  150. }
  151. static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
  152. uint16_t *dstV,
  153. const uint16_t *src1,
  154. const uint16_t *src2,
  155. int width,
  156. enum AVPixelFormat origin,
  157. int32_t *rgb2yuv)
  158. {
  159. int i;
  160. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  161. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  162. av_assert1(src1 == src2);
  163. for (i = 0; i < width; i++) {
  164. int r_b = (input_pixel(&src1[6 * i + 0]) +
  165. input_pixel(&src1[6 * i + 3]) + 1) >> 1;
  166. int g = (input_pixel(&src1[6 * i + 1]) +
  167. input_pixel(&src1[6 * i + 4]) + 1) >> 1;
  168. int b_r = (input_pixel(&src1[6 * i + 2]) +
  169. input_pixel(&src1[6 * i + 5]) + 1) >> 1;
  170. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  171. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  172. }
  173. }
  174. #undef r
  175. #undef b
  176. #undef input_pixel
  177. #define rgb48funcs(pattern, BE_LE, origin) \
  178. static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
  179. const uint8_t *_src, \
  180. const uint8_t *unused0, const uint8_t *unused1,\
  181. int width, \
  182. uint32_t *rgb2yuv) \
  183. { \
  184. const uint16_t *src = (const uint16_t *)_src; \
  185. uint16_t *dst = (uint16_t *)_dst; \
  186. rgb48ToY_c_template(dst, src, width, origin, rgb2yuv); \
  187. } \
  188. \
  189. static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
  190. uint8_t *_dstV, \
  191. const uint8_t *unused0, \
  192. const uint8_t *_src1, \
  193. const uint8_t *_src2, \
  194. int width, \
  195. uint32_t *rgb2yuv) \
  196. { \
  197. const uint16_t *src1 = (const uint16_t *)_src1, \
  198. *src2 = (const uint16_t *)_src2; \
  199. uint16_t *dstU = (uint16_t *)_dstU, \
  200. *dstV = (uint16_t *)_dstV; \
  201. rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  202. } \
  203. \
  204. static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
  205. uint8_t *_dstV, \
  206. const uint8_t *unused0, \
  207. const uint8_t *_src1, \
  208. const uint8_t *_src2, \
  209. int width, \
  210. uint32_t *rgb2yuv) \
  211. { \
  212. const uint16_t *src1 = (const uint16_t *)_src1, \
  213. *src2 = (const uint16_t *)_src2; \
  214. uint16_t *dstU = (uint16_t *)_dstU, \
  215. *dstV = (uint16_t *)_dstV; \
  216. rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \
  217. }
  218. rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE)
  219. rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE)
  220. rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE)
  221. rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE)
  222. #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
  223. origin == AV_PIX_FMT_BGRA || \
  224. origin == AV_PIX_FMT_ARGB || \
  225. origin == AV_PIX_FMT_ABGR) \
  226. ? AV_RN32A(&src[(i) * 4]) \
  227. : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \
  228. : AV_RL16(&src[(i) * 2])))
  229. static av_always_inline void rgb16_32ToY_c_template(int16_t *dst,
  230. const uint8_t *src,
  231. int width,
  232. enum AVPixelFormat origin,
  233. int shr, int shg,
  234. int shb, int shp,
  235. int maskr, int maskg,
  236. int maskb, int rsh,
  237. int gsh, int bsh, int S,
  238. int32_t *rgb2yuv)
  239. {
  240. const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh;
  241. const unsigned rnd = (32<<((S)-1)) + (1<<(S-7));
  242. int i;
  243. for (i = 0; i < width; i++) {
  244. int px = input_pixel(i) >> shp;
  245. int b = (px & maskb) >> shb;
  246. int g = (px & maskg) >> shg;
  247. int r = (px & maskr) >> shr;
  248. dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
  249. }
  250. }
  251. static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU,
  252. int16_t *dstV,
  253. const uint8_t *src,
  254. int width,
  255. enum AVPixelFormat origin,
  256. int shr, int shg,
  257. int shb, int shp,
  258. int maskr, int maskg,
  259. int maskb, int rsh,
  260. int gsh, int bsh, int S,
  261. int32_t *rgb2yuv)
  262. {
  263. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  264. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh);
  265. const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7));
  266. int i;
  267. for (i = 0; i < width; i++) {
  268. int px = input_pixel(i) >> shp;
  269. int b = (px & maskb) >> shb;
  270. int g = (px & maskg) >> shg;
  271. int r = (px & maskr) >> shr;
  272. dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
  273. dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
  274. }
  275. }
  276. static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU,
  277. int16_t *dstV,
  278. const uint8_t *src,
  279. int width,
  280. enum AVPixelFormat origin,
  281. int shr, int shg,
  282. int shb, int shp,
  283. int maskr, int maskg,
  284. int maskb, int rsh,
  285. int gsh, int bsh, int S,
  286. int32_t *rgb2yuv)
  287. {
  288. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  289. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh),
  290. maskgx = ~(maskr | maskb);
  291. const unsigned rnd = (256U<<(S)) + (1<<(S-6));
  292. int i;
  293. maskr |= maskr << 1;
  294. maskb |= maskb << 1;
  295. maskg |= maskg << 1;
  296. for (i = 0; i < width; i++) {
  297. unsigned px0 = input_pixel(2 * i + 0) >> shp;
  298. unsigned px1 = input_pixel(2 * i + 1) >> shp;
  299. int b, r, g = (px0 & maskgx) + (px1 & maskgx);
  300. int rb = px0 + px1 - g;
  301. b = (rb & maskb) >> shb;
  302. if (shp ||
  303. origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
  304. origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
  305. g >>= shg;
  306. } else {
  307. g = (g & maskg) >> shg;
  308. }
  309. r = (rb & maskr) >> shr;
  310. dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
  311. dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
  312. }
  313. }
  314. #undef input_pixel
  315. #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
  316. maskg, maskb, rsh, gsh, bsh, S) \
  317. static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
  318. int width, uint32_t *tab) \
  319. { \
  320. rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \
  321. maskr, maskg, maskb, rsh, gsh, bsh, S, tab); \
  322. } \
  323. \
  324. static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
  325. const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
  326. int width, uint32_t *tab) \
  327. { \
  328. rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  329. shr, shg, shb, shp, \
  330. maskr, maskg, maskb, rsh, gsh, bsh, S, tab);\
  331. } \
  332. \
  333. static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
  334. const uint8_t *unused0, const uint8_t *src, \
  335. const uint8_t *dummy, \
  336. int width, uint32_t *tab) \
  337. { \
  338. rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  339. shr, shg, shb, shp, \
  340. maskr, maskg, maskb, \
  341. rsh, gsh, bsh, S, tab); \
  342. }
  343. rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  344. rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  345. rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  346. rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  347. rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  348. rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  349. rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  350. rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  351. rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  352. rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  353. rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  354. rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  355. rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  356. rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  357. rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  358. rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  359. static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV,
  360. const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  361. int width, uint32_t *rgb2yuv)
  362. {
  363. uint16_t *dstU = (uint16_t *)_dstU;
  364. uint16_t *dstV = (uint16_t *)_dstV;
  365. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  366. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  367. int i;
  368. for (i = 0; i < width; i++) {
  369. unsigned int g = gsrc[2*i] + gsrc[2*i+1];
  370. unsigned int b = bsrc[2*i] + bsrc[2*i+1];
  371. unsigned int r = rsrc[2*i] + rsrc[2*i+1];
  372. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  373. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  374. }
  375. }
  376. static void rgba64leToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  377. const uint8_t *unused2, int width, uint32_t *unused)
  378. {
  379. int16_t *dst = (int16_t *)_dst;
  380. const uint16_t *src = (const uint16_t *)_src;
  381. int i;
  382. for (i = 0; i < width; i++)
  383. dst[i] = AV_RL16(src + 4 * i + 3);
  384. }
  385. static void rgba64beToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  386. const uint8_t *unused2, int width, uint32_t *unused)
  387. {
  388. int16_t *dst = (int16_t *)_dst;
  389. const uint16_t *src = (const uint16_t *)_src;
  390. int i;
  391. for (i = 0; i < width; i++)
  392. dst[i] = AV_RB16(src + 4 * i + 3);
  393. }
  394. static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  395. {
  396. int16_t *dst = (int16_t *)_dst;
  397. int i;
  398. for (i=0; i<width; i++) {
  399. dst[i]= src[4*i]<<6;
  400. }
  401. }
  402. static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  403. {
  404. int16_t *dst = (int16_t *)_dst;
  405. int i;
  406. for (i=0; i<width; i++) {
  407. dst[i]= src[4*i+3]<<6;
  408. }
  409. }
  410. static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
  411. {
  412. int16_t *dst = (int16_t *)_dst;
  413. int i;
  414. for (i=0; i<width; i++) {
  415. int d= src[i];
  416. dst[i]= (pal[d] >> 24)<<6;
  417. }
  418. }
  419. static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
  420. {
  421. int16_t *dst = (int16_t *)_dst;
  422. int i;
  423. for (i = 0; i < width; i++) {
  424. int d = src[i];
  425. dst[i] = (pal[d] & 0xFF)<<6;
  426. }
  427. }
  428. static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV,
  429. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  430. int width, uint32_t *pal)
  431. {
  432. uint16_t *dstU = (uint16_t *)_dstU;
  433. int16_t *dstV = (int16_t *)_dstV;
  434. int i;
  435. av_assert1(src1 == src2);
  436. for (i = 0; i < width; i++) {
  437. int p = pal[src1[i]];
  438. dstU[i] = (uint8_t)(p>> 8)<<6;
  439. dstV[i] = (uint8_t)(p>>16)<<6;
  440. }
  441. }
  442. static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  443. {
  444. int16_t *dst = (int16_t *)_dst;
  445. int i, j;
  446. width = (width + 7) >> 3;
  447. for (i = 0; i < width; i++) {
  448. int d = ~src[i];
  449. for (j = 0; j < 8; j++)
  450. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  451. }
  452. if(width&7){
  453. int d= ~src[i];
  454. for (j = 0; j < (width&7); j++)
  455. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  456. }
  457. }
  458. static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  459. {
  460. int16_t *dst = (int16_t *)_dst;
  461. int i, j;
  462. width = (width + 7) >> 3;
  463. for (i = 0; i < width; i++) {
  464. int d = src[i];
  465. for (j = 0; j < 8; j++)
  466. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  467. }
  468. if(width&7){
  469. int d = src[i];
  470. for (j = 0; j < (width&7); j++)
  471. dst[8*i+j] = ((d>>(7-j))&1) * 16383;
  472. }
  473. }
  474. static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  475. uint32_t *unused)
  476. {
  477. int i;
  478. for (i = 0; i < width; i++)
  479. dst[i] = src[2 * i];
  480. }
  481. static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  482. const uint8_t *src2, int width, uint32_t *unused)
  483. {
  484. int i;
  485. for (i = 0; i < width; i++) {
  486. dstU[i] = src1[4 * i + 1];
  487. dstV[i] = src1[4 * i + 3];
  488. }
  489. av_assert1(src1 == src2);
  490. }
  491. static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  492. const uint8_t *src2, int width, uint32_t *unused)
  493. {
  494. int i;
  495. for (i = 0; i < width; i++) {
  496. dstV[i] = src1[4 * i + 1];
  497. dstU[i] = src1[4 * i + 3];
  498. }
  499. av_assert1(src1 == src2);
  500. }
  501. static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width,
  502. uint32_t *unused)
  503. {
  504. int i;
  505. const uint16_t *src = (const uint16_t *)_src;
  506. uint16_t *dst = (uint16_t *)_dst;
  507. for (i = 0; i < width; i++)
  508. dst[i] = av_bswap16(src[i]);
  509. }
  510. static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
  511. const uint8_t *_src2, int width, uint32_t *unused)
  512. {
  513. int i;
  514. const uint16_t *src1 = (const uint16_t *)_src1,
  515. *src2 = (const uint16_t *)_src2;
  516. uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
  517. for (i = 0; i < width; i++) {
  518. dstU[i] = av_bswap16(src1[i]);
  519. dstV[i] = av_bswap16(src2[i]);
  520. }
  521. }
  522. static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  523. uint32_t *unused)
  524. {
  525. int i;
  526. for (i = 0; i < width; i++)
  527. AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
  528. }
  529. static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  530. uint32_t *unused)
  531. {
  532. int i;
  533. for (i = 0; i < width; i++)
  534. AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
  535. }
  536. static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  537. uint32_t *unused)
  538. {
  539. int i;
  540. for (i = 0; i < width; i++)
  541. AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
  542. }
  543. static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  544. uint32_t *unused)
  545. {
  546. int i;
  547. for (i = 0; i < width; i++)
  548. AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
  549. }
  550. static void read_ayuv64le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  551. uint32_t *unused2)
  552. {
  553. int i;
  554. for (i = 0; i < width; i++)
  555. AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2));
  556. }
  557. static void read_ayuv64le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  558. const uint8_t *unused1, int width, uint32_t *unused2)
  559. {
  560. int i;
  561. for (i = 0; i < width; i++) {
  562. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 4));
  563. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6));
  564. }
  565. }
  566. static void read_ayuv64le_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  567. uint32_t *unused2)
  568. {
  569. int i;
  570. for (i = 0; i < width; i++)
  571. AV_WN16(dst + i * 2, AV_RL16(src + i * 8));
  572. }
  573. /* This is almost identical to the previous, end exists only because
  574. * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
  575. static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  576. uint32_t *unused)
  577. {
  578. int i;
  579. for (i = 0; i < width; i++)
  580. dst[i] = src[2 * i + 1];
  581. }
  582. static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  583. const uint8_t *src2, int width, uint32_t *unused)
  584. {
  585. int i;
  586. for (i = 0; i < width; i++) {
  587. dstU[i] = src1[4 * i + 0];
  588. dstV[i] = src1[4 * i + 2];
  589. }
  590. av_assert1(src1 == src2);
  591. }
  592. static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
  593. const uint8_t *src, int width)
  594. {
  595. int i;
  596. for (i = 0; i < width; i++) {
  597. dst1[i] = src[2 * i + 0];
  598. dst2[i] = src[2 * i + 1];
  599. }
  600. }
  601. static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
  602. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  603. int width, uint32_t *unused)
  604. {
  605. nvXXtoUV_c(dstU, dstV, src1, width);
  606. }
  607. static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
  608. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  609. int width, uint32_t *unused)
  610. {
  611. nvXXtoUV_c(dstV, dstU, src1, width);
  612. }
  613. static void p010LEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
  614. const uint8_t *unused2, int width, uint32_t *unused)
  615. {
  616. int i;
  617. for (i = 0; i < width; i++) {
  618. AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> 6);
  619. }
  620. }
  621. static void p010BEToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1,
  622. const uint8_t *unused2, int width, uint32_t *unused)
  623. {
  624. int i;
  625. for (i = 0; i < width; i++) {
  626. AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> 6);
  627. }
  628. }
  629. static void p010LEToUV_c(uint8_t *dstU, uint8_t *dstV,
  630. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  631. int width, uint32_t *unused)
  632. {
  633. int i;
  634. for (i = 0; i < width; i++) {
  635. AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> 6);
  636. AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> 6);
  637. }
  638. }
  639. static void p010BEToUV_c(uint8_t *dstU, uint8_t *dstV,
  640. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  641. int width, uint32_t *unused)
  642. {
  643. int i;
  644. for (i = 0; i < width; i++) {
  645. AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> 6);
  646. AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> 6);
  647. }
  648. }
  649. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  650. static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
  651. int width, uint32_t *rgb2yuv)
  652. {
  653. int16_t *dst = (int16_t *)_dst;
  654. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  655. int i;
  656. for (i = 0; i < width; i++) {
  657. int b = src[i * 3 + 0];
  658. int g = src[i * 3 + 1];
  659. int r = src[i * 3 + 2];
  660. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  661. }
  662. }
  663. static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  664. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  665. {
  666. int16_t *dstU = (int16_t *)_dstU;
  667. int16_t *dstV = (int16_t *)_dstV;
  668. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  669. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  670. int i;
  671. for (i = 0; i < width; i++) {
  672. int b = src1[3 * i + 0];
  673. int g = src1[3 * i + 1];
  674. int r = src1[3 * i + 2];
  675. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  676. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  677. }
  678. av_assert1(src1 == src2);
  679. }
  680. static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  681. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  682. {
  683. int16_t *dstU = (int16_t *)_dstU;
  684. int16_t *dstV = (int16_t *)_dstV;
  685. int i;
  686. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  687. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  688. for (i = 0; i < width; i++) {
  689. int b = src1[6 * i + 0] + src1[6 * i + 3];
  690. int g = src1[6 * i + 1] + src1[6 * i + 4];
  691. int r = src1[6 * i + 2] + src1[6 * i + 5];
  692. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  693. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  694. }
  695. av_assert1(src1 == src2);
  696. }
  697. static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  698. uint32_t *rgb2yuv)
  699. {
  700. int16_t *dst = (int16_t *)_dst;
  701. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  702. int i;
  703. for (i = 0; i < width; i++) {
  704. int r = src[i * 3 + 0];
  705. int g = src[i * 3 + 1];
  706. int b = src[i * 3 + 2];
  707. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  708. }
  709. }
  710. static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  711. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  712. {
  713. int16_t *dstU = (int16_t *)_dstU;
  714. int16_t *dstV = (int16_t *)_dstV;
  715. int i;
  716. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  717. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  718. av_assert1(src1 == src2);
  719. for (i = 0; i < width; i++) {
  720. int r = src1[3 * i + 0];
  721. int g = src1[3 * i + 1];
  722. int b = src1[3 * i + 2];
  723. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  724. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  725. }
  726. }
  727. static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  728. const uint8_t *src2, int width, uint32_t *rgb2yuv)
  729. {
  730. int16_t *dstU = (int16_t *)_dstU;
  731. int16_t *dstV = (int16_t *)_dstV;
  732. int i;
  733. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  734. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  735. av_assert1(src1 == src2);
  736. for (i = 0; i < width; i++) {
  737. int r = src1[6 * i + 0] + src1[6 * i + 3];
  738. int g = src1[6 * i + 1] + src1[6 * i + 4];
  739. int b = src1[6 * i + 2] + src1[6 * i + 5];
  740. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  741. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  742. }
  743. }
  744. static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv)
  745. {
  746. uint16_t *dst = (uint16_t *)_dst;
  747. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  748. int i;
  749. for (i = 0; i < width; i++) {
  750. int g = src[0][i];
  751. int b = src[1][i];
  752. int r = src[2][i];
  753. dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  754. }
  755. }
  756. static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused)
  757. {
  758. uint16_t *dst = (uint16_t *)_dst;
  759. int i;
  760. for (i = 0; i < width; i++)
  761. dst[i] = src[3][i] << 6;
  762. }
  763. static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv)
  764. {
  765. uint16_t *dstU = (uint16_t *)_dstU;
  766. uint16_t *dstV = (uint16_t *)_dstV;
  767. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  768. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  769. int i;
  770. for (i = 0; i < width; i++) {
  771. int g = src[0][i];
  772. int b = src[1][i];
  773. int r = src[2][i];
  774. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  775. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  776. }
  777. }
  778. #define rdpx(src) \
  779. is_be ? AV_RB16(src) : AV_RL16(src)
  780. static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
  781. int width, int bpc, int is_be, int32_t *rgb2yuv)
  782. {
  783. int i;
  784. const uint16_t **src = (const uint16_t **)_src;
  785. uint16_t *dst = (uint16_t *)_dst;
  786. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  787. int shift = bpc < 16 ? bpc : 14;
  788. for (i = 0; i < width; i++) {
  789. int g = rdpx(src[0] + i);
  790. int b = rdpx(src[1] + i);
  791. int r = rdpx(src[2] + i);
  792. dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
  793. }
  794. }
  795. static av_always_inline void planar_rgb16_to_a(uint8_t *_dst, const uint8_t *_src[4],
  796. int width, int bpc, int is_be, int32_t *rgb2yuv)
  797. {
  798. int i;
  799. const uint16_t **src = (const uint16_t **)_src;
  800. uint16_t *dst = (uint16_t *)_dst;
  801. int shift = bpc < 16 ? bpc : 14;
  802. for (i = 0; i < width; i++) {
  803. dst[i] = rdpx(src[3] + i) << (14 - shift);
  804. }
  805. }
  806. static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
  807. const uint8_t *_src[4], int width,
  808. int bpc, int is_be, int32_t *rgb2yuv)
  809. {
  810. int i;
  811. const uint16_t **src = (const uint16_t **)_src;
  812. uint16_t *dstU = (uint16_t *)_dstU;
  813. uint16_t *dstV = (uint16_t *)_dstV;
  814. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  815. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  816. int shift = bpc < 16 ? bpc : 14;
  817. for (i = 0; i < width; i++) {
  818. int g = rdpx(src[0] + i);
  819. int b = rdpx(src[1] + i);
  820. int r = rdpx(src[2] + i);
  821. dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  822. dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
  823. }
  824. }
  825. #undef rdpx
  826. #define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \
  827. static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
  828. int w, int32_t *rgb2yuv) \
  829. { \
  830. planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \
  831. } \
  832. static void planar_rgb##nbits##endian_name##_to_a(uint8_t *dst, const uint8_t *src[4], \
  833. int w, int32_t *rgb2yuv) \
  834. { \
  835. planar_rgb16_to_a(dst, src, w, nbits, endian, rgb2yuv); \
  836. } \
  837. static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
  838. const uint8_t *src[4], int w, int32_t *rgb2yuv) \
  839. { \
  840. planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \
  841. } \
  842. #define rgb9plus_planar_funcs(nbits) \
  843. rgb9plus_planar_funcs_endian(nbits, le, 0) \
  844. rgb9plus_planar_funcs_endian(nbits, be, 1)
  845. rgb9plus_planar_funcs(9)
  846. rgb9plus_planar_funcs(10)
  847. rgb9plus_planar_funcs(12)
  848. rgb9plus_planar_funcs(14)
  849. rgb9plus_planar_funcs(16)
  850. av_cold void ff_sws_init_input_funcs(SwsContext *c)
  851. {
  852. enum AVPixelFormat srcFormat = c->srcFormat;
  853. c->chrToYV12 = NULL;
  854. switch (srcFormat) {
  855. case AV_PIX_FMT_YUYV422:
  856. c->chrToYV12 = yuy2ToUV_c;
  857. break;
  858. case AV_PIX_FMT_YVYU422:
  859. c->chrToYV12 = yvy2ToUV_c;
  860. break;
  861. case AV_PIX_FMT_UYVY422:
  862. c->chrToYV12 = uyvyToUV_c;
  863. break;
  864. case AV_PIX_FMT_NV12:
  865. c->chrToYV12 = nv12ToUV_c;
  866. break;
  867. case AV_PIX_FMT_NV21:
  868. c->chrToYV12 = nv21ToUV_c;
  869. break;
  870. case AV_PIX_FMT_RGB8:
  871. case AV_PIX_FMT_BGR8:
  872. case AV_PIX_FMT_PAL8:
  873. case AV_PIX_FMT_BGR4_BYTE:
  874. case AV_PIX_FMT_RGB4_BYTE:
  875. c->chrToYV12 = palToUV_c;
  876. break;
  877. case AV_PIX_FMT_GBRP9LE:
  878. c->readChrPlanar = planar_rgb9le_to_uv;
  879. break;
  880. case AV_PIX_FMT_GBRAP10LE:
  881. case AV_PIX_FMT_GBRP10LE:
  882. c->readChrPlanar = planar_rgb10le_to_uv;
  883. break;
  884. case AV_PIX_FMT_GBRAP12LE:
  885. case AV_PIX_FMT_GBRP12LE:
  886. c->readChrPlanar = planar_rgb12le_to_uv;
  887. break;
  888. case AV_PIX_FMT_GBRP14LE:
  889. c->readChrPlanar = planar_rgb14le_to_uv;
  890. break;
  891. case AV_PIX_FMT_GBRAP16LE:
  892. case AV_PIX_FMT_GBRP16LE:
  893. c->readChrPlanar = planar_rgb16le_to_uv;
  894. break;
  895. case AV_PIX_FMT_GBRP9BE:
  896. c->readChrPlanar = planar_rgb9be_to_uv;
  897. break;
  898. case AV_PIX_FMT_GBRAP10BE:
  899. case AV_PIX_FMT_GBRP10BE:
  900. c->readChrPlanar = planar_rgb10be_to_uv;
  901. break;
  902. case AV_PIX_FMT_GBRAP12BE:
  903. case AV_PIX_FMT_GBRP12BE:
  904. c->readChrPlanar = planar_rgb12be_to_uv;
  905. break;
  906. case AV_PIX_FMT_GBRP14BE:
  907. c->readChrPlanar = planar_rgb14be_to_uv;
  908. break;
  909. case AV_PIX_FMT_GBRAP16BE:
  910. case AV_PIX_FMT_GBRP16BE:
  911. c->readChrPlanar = planar_rgb16be_to_uv;
  912. break;
  913. case AV_PIX_FMT_GBRAP:
  914. case AV_PIX_FMT_GBRP:
  915. c->readChrPlanar = planar_rgb_to_uv;
  916. break;
  917. #if HAVE_BIGENDIAN
  918. case AV_PIX_FMT_YUV444P9LE:
  919. case AV_PIX_FMT_YUV422P9LE:
  920. case AV_PIX_FMT_YUV420P9LE:
  921. case AV_PIX_FMT_YUV422P10LE:
  922. case AV_PIX_FMT_YUV440P10LE:
  923. case AV_PIX_FMT_YUV444P10LE:
  924. case AV_PIX_FMT_YUV420P10LE:
  925. case AV_PIX_FMT_YUV422P12LE:
  926. case AV_PIX_FMT_YUV440P12LE:
  927. case AV_PIX_FMT_YUV444P12LE:
  928. case AV_PIX_FMT_YUV420P12LE:
  929. case AV_PIX_FMT_YUV422P14LE:
  930. case AV_PIX_FMT_YUV444P14LE:
  931. case AV_PIX_FMT_YUV420P14LE:
  932. case AV_PIX_FMT_YUV420P16LE:
  933. case AV_PIX_FMT_YUV422P16LE:
  934. case AV_PIX_FMT_YUV444P16LE:
  935. case AV_PIX_FMT_YUVA444P9LE:
  936. case AV_PIX_FMT_YUVA422P9LE:
  937. case AV_PIX_FMT_YUVA420P9LE:
  938. case AV_PIX_FMT_YUVA444P10LE:
  939. case AV_PIX_FMT_YUVA422P10LE:
  940. case AV_PIX_FMT_YUVA420P10LE:
  941. case AV_PIX_FMT_YUVA420P16LE:
  942. case AV_PIX_FMT_YUVA422P16LE:
  943. case AV_PIX_FMT_YUVA444P16LE:
  944. c->chrToYV12 = bswap16UV_c;
  945. break;
  946. #else
  947. case AV_PIX_FMT_YUV444P9BE:
  948. case AV_PIX_FMT_YUV422P9BE:
  949. case AV_PIX_FMT_YUV420P9BE:
  950. case AV_PIX_FMT_YUV440P10BE:
  951. case AV_PIX_FMT_YUV444P10BE:
  952. case AV_PIX_FMT_YUV422P10BE:
  953. case AV_PIX_FMT_YUV420P10BE:
  954. case AV_PIX_FMT_YUV440P12BE:
  955. case AV_PIX_FMT_YUV444P12BE:
  956. case AV_PIX_FMT_YUV422P12BE:
  957. case AV_PIX_FMT_YUV420P12BE:
  958. case AV_PIX_FMT_YUV444P14BE:
  959. case AV_PIX_FMT_YUV422P14BE:
  960. case AV_PIX_FMT_YUV420P14BE:
  961. case AV_PIX_FMT_YUV420P16BE:
  962. case AV_PIX_FMT_YUV422P16BE:
  963. case AV_PIX_FMT_YUV444P16BE:
  964. case AV_PIX_FMT_YUVA444P9BE:
  965. case AV_PIX_FMT_YUVA422P9BE:
  966. case AV_PIX_FMT_YUVA420P9BE:
  967. case AV_PIX_FMT_YUVA444P10BE:
  968. case AV_PIX_FMT_YUVA422P10BE:
  969. case AV_PIX_FMT_YUVA420P10BE:
  970. case AV_PIX_FMT_YUVA420P16BE:
  971. case AV_PIX_FMT_YUVA422P16BE:
  972. case AV_PIX_FMT_YUVA444P16BE:
  973. c->chrToYV12 = bswap16UV_c;
  974. break;
  975. #endif
  976. case AV_PIX_FMT_AYUV64LE:
  977. c->chrToYV12 = read_ayuv64le_UV_c;
  978. break;
  979. case AV_PIX_FMT_P010LE:
  980. c->chrToYV12 = p010LEToUV_c;
  981. break;
  982. case AV_PIX_FMT_P010BE:
  983. c->chrToYV12 = p010BEToUV_c;
  984. break;
  985. }
  986. if (c->chrSrcHSubSample) {
  987. switch (srcFormat) {
  988. case AV_PIX_FMT_RGBA64BE:
  989. c->chrToYV12 = rgb64BEToUV_half_c;
  990. break;
  991. case AV_PIX_FMT_RGBA64LE:
  992. c->chrToYV12 = rgb64LEToUV_half_c;
  993. break;
  994. case AV_PIX_FMT_BGRA64BE:
  995. c->chrToYV12 = bgr64BEToUV_half_c;
  996. break;
  997. case AV_PIX_FMT_BGRA64LE:
  998. c->chrToYV12 = bgr64LEToUV_half_c;
  999. break;
  1000. case AV_PIX_FMT_RGB48BE:
  1001. c->chrToYV12 = rgb48BEToUV_half_c;
  1002. break;
  1003. case AV_PIX_FMT_RGB48LE:
  1004. c->chrToYV12 = rgb48LEToUV_half_c;
  1005. break;
  1006. case AV_PIX_FMT_BGR48BE:
  1007. c->chrToYV12 = bgr48BEToUV_half_c;
  1008. break;
  1009. case AV_PIX_FMT_BGR48LE:
  1010. c->chrToYV12 = bgr48LEToUV_half_c;
  1011. break;
  1012. case AV_PIX_FMT_RGB32:
  1013. c->chrToYV12 = bgr32ToUV_half_c;
  1014. break;
  1015. case AV_PIX_FMT_RGB32_1:
  1016. c->chrToYV12 = bgr321ToUV_half_c;
  1017. break;
  1018. case AV_PIX_FMT_BGR24:
  1019. c->chrToYV12 = bgr24ToUV_half_c;
  1020. break;
  1021. case AV_PIX_FMT_BGR565LE:
  1022. c->chrToYV12 = bgr16leToUV_half_c;
  1023. break;
  1024. case AV_PIX_FMT_BGR565BE:
  1025. c->chrToYV12 = bgr16beToUV_half_c;
  1026. break;
  1027. case AV_PIX_FMT_BGR555LE:
  1028. c->chrToYV12 = bgr15leToUV_half_c;
  1029. break;
  1030. case AV_PIX_FMT_BGR555BE:
  1031. c->chrToYV12 = bgr15beToUV_half_c;
  1032. break;
  1033. case AV_PIX_FMT_GBRAP:
  1034. case AV_PIX_FMT_GBRP:
  1035. c->chrToYV12 = gbr24pToUV_half_c;
  1036. break;
  1037. case AV_PIX_FMT_BGR444LE:
  1038. c->chrToYV12 = bgr12leToUV_half_c;
  1039. break;
  1040. case AV_PIX_FMT_BGR444BE:
  1041. c->chrToYV12 = bgr12beToUV_half_c;
  1042. break;
  1043. case AV_PIX_FMT_BGR32:
  1044. c->chrToYV12 = rgb32ToUV_half_c;
  1045. break;
  1046. case AV_PIX_FMT_BGR32_1:
  1047. c->chrToYV12 = rgb321ToUV_half_c;
  1048. break;
  1049. case AV_PIX_FMT_RGB24:
  1050. c->chrToYV12 = rgb24ToUV_half_c;
  1051. break;
  1052. case AV_PIX_FMT_RGB565LE:
  1053. c->chrToYV12 = rgb16leToUV_half_c;
  1054. break;
  1055. case AV_PIX_FMT_RGB565BE:
  1056. c->chrToYV12 = rgb16beToUV_half_c;
  1057. break;
  1058. case AV_PIX_FMT_RGB555LE:
  1059. c->chrToYV12 = rgb15leToUV_half_c;
  1060. break;
  1061. case AV_PIX_FMT_RGB555BE:
  1062. c->chrToYV12 = rgb15beToUV_half_c;
  1063. break;
  1064. case AV_PIX_FMT_RGB444LE:
  1065. c->chrToYV12 = rgb12leToUV_half_c;
  1066. break;
  1067. case AV_PIX_FMT_RGB444BE:
  1068. c->chrToYV12 = rgb12beToUV_half_c;
  1069. break;
  1070. }
  1071. } else {
  1072. switch (srcFormat) {
  1073. case AV_PIX_FMT_RGBA64BE:
  1074. c->chrToYV12 = rgb64BEToUV_c;
  1075. break;
  1076. case AV_PIX_FMT_RGBA64LE:
  1077. c->chrToYV12 = rgb64LEToUV_c;
  1078. break;
  1079. case AV_PIX_FMT_BGRA64BE:
  1080. c->chrToYV12 = bgr64BEToUV_c;
  1081. break;
  1082. case AV_PIX_FMT_BGRA64LE:
  1083. c->chrToYV12 = bgr64LEToUV_c;
  1084. break;
  1085. case AV_PIX_FMT_RGB48BE:
  1086. c->chrToYV12 = rgb48BEToUV_c;
  1087. break;
  1088. case AV_PIX_FMT_RGB48LE:
  1089. c->chrToYV12 = rgb48LEToUV_c;
  1090. break;
  1091. case AV_PIX_FMT_BGR48BE:
  1092. c->chrToYV12 = bgr48BEToUV_c;
  1093. break;
  1094. case AV_PIX_FMT_BGR48LE:
  1095. c->chrToYV12 = bgr48LEToUV_c;
  1096. break;
  1097. case AV_PIX_FMT_RGB32:
  1098. c->chrToYV12 = bgr32ToUV_c;
  1099. break;
  1100. case AV_PIX_FMT_RGB32_1:
  1101. c->chrToYV12 = bgr321ToUV_c;
  1102. break;
  1103. case AV_PIX_FMT_BGR24:
  1104. c->chrToYV12 = bgr24ToUV_c;
  1105. break;
  1106. case AV_PIX_FMT_BGR565LE:
  1107. c->chrToYV12 = bgr16leToUV_c;
  1108. break;
  1109. case AV_PIX_FMT_BGR565BE:
  1110. c->chrToYV12 = bgr16beToUV_c;
  1111. break;
  1112. case AV_PIX_FMT_BGR555LE:
  1113. c->chrToYV12 = bgr15leToUV_c;
  1114. break;
  1115. case AV_PIX_FMT_BGR555BE:
  1116. c->chrToYV12 = bgr15beToUV_c;
  1117. break;
  1118. case AV_PIX_FMT_BGR444LE:
  1119. c->chrToYV12 = bgr12leToUV_c;
  1120. break;
  1121. case AV_PIX_FMT_BGR444BE:
  1122. c->chrToYV12 = bgr12beToUV_c;
  1123. break;
  1124. case AV_PIX_FMT_BGR32:
  1125. c->chrToYV12 = rgb32ToUV_c;
  1126. break;
  1127. case AV_PIX_FMT_BGR32_1:
  1128. c->chrToYV12 = rgb321ToUV_c;
  1129. break;
  1130. case AV_PIX_FMT_RGB24:
  1131. c->chrToYV12 = rgb24ToUV_c;
  1132. break;
  1133. case AV_PIX_FMT_RGB565LE:
  1134. c->chrToYV12 = rgb16leToUV_c;
  1135. break;
  1136. case AV_PIX_FMT_RGB565BE:
  1137. c->chrToYV12 = rgb16beToUV_c;
  1138. break;
  1139. case AV_PIX_FMT_RGB555LE:
  1140. c->chrToYV12 = rgb15leToUV_c;
  1141. break;
  1142. case AV_PIX_FMT_RGB555BE:
  1143. c->chrToYV12 = rgb15beToUV_c;
  1144. break;
  1145. case AV_PIX_FMT_RGB444LE:
  1146. c->chrToYV12 = rgb12leToUV_c;
  1147. break;
  1148. case AV_PIX_FMT_RGB444BE:
  1149. c->chrToYV12 = rgb12beToUV_c;
  1150. break;
  1151. }
  1152. }
  1153. c->lumToYV12 = NULL;
  1154. c->alpToYV12 = NULL;
  1155. switch (srcFormat) {
  1156. case AV_PIX_FMT_GBRP9LE:
  1157. c->readLumPlanar = planar_rgb9le_to_y;
  1158. break;
  1159. case AV_PIX_FMT_GBRAP10LE:
  1160. c->readAlpPlanar = planar_rgb10le_to_a;
  1161. case AV_PIX_FMT_GBRP10LE:
  1162. c->readLumPlanar = planar_rgb10le_to_y;
  1163. break;
  1164. case AV_PIX_FMT_GBRAP12LE:
  1165. c->readAlpPlanar = planar_rgb12le_to_a;
  1166. case AV_PIX_FMT_GBRP12LE:
  1167. c->readLumPlanar = planar_rgb12le_to_y;
  1168. break;
  1169. case AV_PIX_FMT_GBRP14LE:
  1170. c->readLumPlanar = planar_rgb14le_to_y;
  1171. break;
  1172. case AV_PIX_FMT_GBRAP16LE:
  1173. c->readAlpPlanar = planar_rgb16le_to_a;
  1174. case AV_PIX_FMT_GBRP16LE:
  1175. c->readLumPlanar = planar_rgb16le_to_y;
  1176. break;
  1177. case AV_PIX_FMT_GBRP9BE:
  1178. c->readLumPlanar = planar_rgb9be_to_y;
  1179. break;
  1180. case AV_PIX_FMT_GBRAP10BE:
  1181. c->readAlpPlanar = planar_rgb10be_to_a;
  1182. case AV_PIX_FMT_GBRP10BE:
  1183. c->readLumPlanar = planar_rgb10be_to_y;
  1184. break;
  1185. case AV_PIX_FMT_GBRAP12BE:
  1186. c->readAlpPlanar = planar_rgb12be_to_a;
  1187. case AV_PIX_FMT_GBRP12BE:
  1188. c->readLumPlanar = planar_rgb12be_to_y;
  1189. break;
  1190. case AV_PIX_FMT_GBRP14BE:
  1191. c->readLumPlanar = planar_rgb14be_to_y;
  1192. break;
  1193. case AV_PIX_FMT_GBRAP16BE:
  1194. c->readAlpPlanar = planar_rgb16be_to_a;
  1195. case AV_PIX_FMT_GBRP16BE:
  1196. c->readLumPlanar = planar_rgb16be_to_y;
  1197. break;
  1198. case AV_PIX_FMT_GBRAP:
  1199. c->readAlpPlanar = planar_rgb_to_a;
  1200. case AV_PIX_FMT_GBRP:
  1201. c->readLumPlanar = planar_rgb_to_y;
  1202. break;
  1203. #if HAVE_BIGENDIAN
  1204. case AV_PIX_FMT_YUV444P9LE:
  1205. case AV_PIX_FMT_YUV422P9LE:
  1206. case AV_PIX_FMT_YUV420P9LE:
  1207. case AV_PIX_FMT_YUV444P10LE:
  1208. case AV_PIX_FMT_YUV440P10LE:
  1209. case AV_PIX_FMT_YUV422P10LE:
  1210. case AV_PIX_FMT_YUV420P10LE:
  1211. case AV_PIX_FMT_YUV444P12LE:
  1212. case AV_PIX_FMT_YUV440P12LE:
  1213. case AV_PIX_FMT_YUV422P12LE:
  1214. case AV_PIX_FMT_YUV420P12LE:
  1215. case AV_PIX_FMT_YUV444P14LE:
  1216. case AV_PIX_FMT_YUV422P14LE:
  1217. case AV_PIX_FMT_YUV420P14LE:
  1218. case AV_PIX_FMT_YUV420P16LE:
  1219. case AV_PIX_FMT_YUV422P16LE:
  1220. case AV_PIX_FMT_YUV444P16LE:
  1221. case AV_PIX_FMT_GRAY16LE:
  1222. c->lumToYV12 = bswap16Y_c;
  1223. break;
  1224. case AV_PIX_FMT_YUVA444P9LE:
  1225. case AV_PIX_FMT_YUVA422P9LE:
  1226. case AV_PIX_FMT_YUVA420P9LE:
  1227. case AV_PIX_FMT_YUVA444P10LE:
  1228. case AV_PIX_FMT_YUVA422P10LE:
  1229. case AV_PIX_FMT_YUVA420P10LE:
  1230. case AV_PIX_FMT_YUVA420P16LE:
  1231. case AV_PIX_FMT_YUVA422P16LE:
  1232. case AV_PIX_FMT_YUVA444P16LE:
  1233. c->lumToYV12 = bswap16Y_c;
  1234. c->alpToYV12 = bswap16Y_c;
  1235. break;
  1236. #else
  1237. case AV_PIX_FMT_YUV444P9BE:
  1238. case AV_PIX_FMT_YUV422P9BE:
  1239. case AV_PIX_FMT_YUV420P9BE:
  1240. case AV_PIX_FMT_YUV444P10BE:
  1241. case AV_PIX_FMT_YUV440P10BE:
  1242. case AV_PIX_FMT_YUV422P10BE:
  1243. case AV_PIX_FMT_YUV420P10BE:
  1244. case AV_PIX_FMT_YUV444P12BE:
  1245. case AV_PIX_FMT_YUV440P12BE:
  1246. case AV_PIX_FMT_YUV422P12BE:
  1247. case AV_PIX_FMT_YUV420P12BE:
  1248. case AV_PIX_FMT_YUV444P14BE:
  1249. case AV_PIX_FMT_YUV422P14BE:
  1250. case AV_PIX_FMT_YUV420P14BE:
  1251. case AV_PIX_FMT_YUV420P16BE:
  1252. case AV_PIX_FMT_YUV422P16BE:
  1253. case AV_PIX_FMT_YUV444P16BE:
  1254. case AV_PIX_FMT_GRAY16BE:
  1255. c->lumToYV12 = bswap16Y_c;
  1256. break;
  1257. case AV_PIX_FMT_YUVA444P9BE:
  1258. case AV_PIX_FMT_YUVA422P9BE:
  1259. case AV_PIX_FMT_YUVA420P9BE:
  1260. case AV_PIX_FMT_YUVA444P10BE:
  1261. case AV_PIX_FMT_YUVA422P10BE:
  1262. case AV_PIX_FMT_YUVA420P10BE:
  1263. case AV_PIX_FMT_YUVA420P16BE:
  1264. case AV_PIX_FMT_YUVA422P16BE:
  1265. case AV_PIX_FMT_YUVA444P16BE:
  1266. c->lumToYV12 = bswap16Y_c;
  1267. c->alpToYV12 = bswap16Y_c;
  1268. break;
  1269. #endif
  1270. case AV_PIX_FMT_YA16LE:
  1271. c->lumToYV12 = read_ya16le_gray_c;
  1272. break;
  1273. case AV_PIX_FMT_YA16BE:
  1274. c->lumToYV12 = read_ya16be_gray_c;
  1275. break;
  1276. case AV_PIX_FMT_AYUV64LE:
  1277. c->lumToYV12 = read_ayuv64le_Y_c;
  1278. break;
  1279. case AV_PIX_FMT_YUYV422:
  1280. case AV_PIX_FMT_YVYU422:
  1281. case AV_PIX_FMT_YA8:
  1282. c->lumToYV12 = yuy2ToY_c;
  1283. break;
  1284. case AV_PIX_FMT_UYVY422:
  1285. c->lumToYV12 = uyvyToY_c;
  1286. break;
  1287. case AV_PIX_FMT_BGR24:
  1288. c->lumToYV12 = bgr24ToY_c;
  1289. break;
  1290. case AV_PIX_FMT_BGR565LE:
  1291. c->lumToYV12 = bgr16leToY_c;
  1292. break;
  1293. case AV_PIX_FMT_BGR565BE:
  1294. c->lumToYV12 = bgr16beToY_c;
  1295. break;
  1296. case AV_PIX_FMT_BGR555LE:
  1297. c->lumToYV12 = bgr15leToY_c;
  1298. break;
  1299. case AV_PIX_FMT_BGR555BE:
  1300. c->lumToYV12 = bgr15beToY_c;
  1301. break;
  1302. case AV_PIX_FMT_BGR444LE:
  1303. c->lumToYV12 = bgr12leToY_c;
  1304. break;
  1305. case AV_PIX_FMT_BGR444BE:
  1306. c->lumToYV12 = bgr12beToY_c;
  1307. break;
  1308. case AV_PIX_FMT_RGB24:
  1309. c->lumToYV12 = rgb24ToY_c;
  1310. break;
  1311. case AV_PIX_FMT_RGB565LE:
  1312. c->lumToYV12 = rgb16leToY_c;
  1313. break;
  1314. case AV_PIX_FMT_RGB565BE:
  1315. c->lumToYV12 = rgb16beToY_c;
  1316. break;
  1317. case AV_PIX_FMT_RGB555LE:
  1318. c->lumToYV12 = rgb15leToY_c;
  1319. break;
  1320. case AV_PIX_FMT_RGB555BE:
  1321. c->lumToYV12 = rgb15beToY_c;
  1322. break;
  1323. case AV_PIX_FMT_RGB444LE:
  1324. c->lumToYV12 = rgb12leToY_c;
  1325. break;
  1326. case AV_PIX_FMT_RGB444BE:
  1327. c->lumToYV12 = rgb12beToY_c;
  1328. break;
  1329. case AV_PIX_FMT_RGB8:
  1330. case AV_PIX_FMT_BGR8:
  1331. case AV_PIX_FMT_PAL8:
  1332. case AV_PIX_FMT_BGR4_BYTE:
  1333. case AV_PIX_FMT_RGB4_BYTE:
  1334. c->lumToYV12 = palToY_c;
  1335. break;
  1336. case AV_PIX_FMT_MONOBLACK:
  1337. c->lumToYV12 = monoblack2Y_c;
  1338. break;
  1339. case AV_PIX_FMT_MONOWHITE:
  1340. c->lumToYV12 = monowhite2Y_c;
  1341. break;
  1342. case AV_PIX_FMT_RGB32:
  1343. c->lumToYV12 = bgr32ToY_c;
  1344. break;
  1345. case AV_PIX_FMT_RGB32_1:
  1346. c->lumToYV12 = bgr321ToY_c;
  1347. break;
  1348. case AV_PIX_FMT_BGR32:
  1349. c->lumToYV12 = rgb32ToY_c;
  1350. break;
  1351. case AV_PIX_FMT_BGR32_1:
  1352. c->lumToYV12 = rgb321ToY_c;
  1353. break;
  1354. case AV_PIX_FMT_RGB48BE:
  1355. c->lumToYV12 = rgb48BEToY_c;
  1356. break;
  1357. case AV_PIX_FMT_RGB48LE:
  1358. c->lumToYV12 = rgb48LEToY_c;
  1359. break;
  1360. case AV_PIX_FMT_BGR48BE:
  1361. c->lumToYV12 = bgr48BEToY_c;
  1362. break;
  1363. case AV_PIX_FMT_BGR48LE:
  1364. c->lumToYV12 = bgr48LEToY_c;
  1365. break;
  1366. case AV_PIX_FMT_RGBA64BE:
  1367. c->lumToYV12 = rgb64BEToY_c;
  1368. break;
  1369. case AV_PIX_FMT_RGBA64LE:
  1370. c->lumToYV12 = rgb64LEToY_c;
  1371. break;
  1372. case AV_PIX_FMT_BGRA64BE:
  1373. c->lumToYV12 = bgr64BEToY_c;
  1374. break;
  1375. case AV_PIX_FMT_BGRA64LE:
  1376. c->lumToYV12 = bgr64LEToY_c;
  1377. break;
  1378. case AV_PIX_FMT_P010LE:
  1379. c->lumToYV12 = p010LEToY_c;
  1380. break;
  1381. case AV_PIX_FMT_P010BE:
  1382. c->lumToYV12 = p010BEToY_c;
  1383. break;
  1384. }
  1385. if (c->needAlpha) {
  1386. if (is16BPS(srcFormat) || isNBPS(srcFormat)) {
  1387. if (HAVE_BIGENDIAN == !isBE(srcFormat))
  1388. c->alpToYV12 = bswap16Y_c;
  1389. }
  1390. switch (srcFormat) {
  1391. case AV_PIX_FMT_BGRA64LE:
  1392. case AV_PIX_FMT_RGBA64LE: c->alpToYV12 = rgba64leToA_c; break;
  1393. case AV_PIX_FMT_BGRA64BE:
  1394. case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64beToA_c; break;
  1395. case AV_PIX_FMT_BGRA:
  1396. case AV_PIX_FMT_RGBA:
  1397. c->alpToYV12 = rgbaToA_c;
  1398. break;
  1399. case AV_PIX_FMT_ABGR:
  1400. case AV_PIX_FMT_ARGB:
  1401. c->alpToYV12 = abgrToA_c;
  1402. break;
  1403. case AV_PIX_FMT_YA8:
  1404. c->alpToYV12 = uyvyToY_c;
  1405. break;
  1406. case AV_PIX_FMT_YA16LE:
  1407. c->alpToYV12 = read_ya16le_alpha_c;
  1408. break;
  1409. case AV_PIX_FMT_YA16BE:
  1410. c->alpToYV12 = read_ya16be_alpha_c;
  1411. break;
  1412. case AV_PIX_FMT_AYUV64LE:
  1413. c->alpToYV12 = read_ayuv64le_A_c;
  1414. break;
  1415. case AV_PIX_FMT_PAL8 :
  1416. c->alpToYV12 = palToA_c;
  1417. break;
  1418. }
  1419. }
  1420. }