input.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036
  1. /*
  2. * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <math.h>
  21. #include <stdint.h>
  22. #include <stdio.h>
  23. #include "libavutil/bswap.h"
  24. #include "libavutil/intreadwrite.h"
  25. #include "libavutil/avassert.h"
  26. #include "config.h"
  27. #include "swscale_internal.h"
  28. #define input_pixel(pos) (is_be ? AV_RB16(pos) : AV_RL16(pos))
  29. #define IS_BE_LE 0
  30. #define IS_BE_BE 1
  31. #define IS_BE_ 0
  32. /* ENDIAN_IDENTIFIER needs to be "BE", "LE" or "". The latter is intended
  33. * for single-byte cases where the concept of endianness does not apply. */
  34. #define IS_BE(ENDIAN_IDENTIFIER) IS_BE_ ## ENDIAN_IDENTIFIER
  35. #define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b)
  36. #define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r)
  37. static av_always_inline void
  38. rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
  39. enum AVPixelFormat origin, int32_t *rgb2yuv, int is_be)
  40. {
  41. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  42. int i;
  43. for (i = 0; i < width; i++) {
  44. unsigned int r_b = input_pixel(&src[i*4+0]);
  45. unsigned int g = input_pixel(&src[i*4+1]);
  46. unsigned int b_r = input_pixel(&src[i*4+2]);
  47. dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  48. }
  49. }
  50. static av_always_inline void
  51. rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
  52. const uint16_t *src1, const uint16_t *src2,
  53. int width, enum AVPixelFormat origin, int32_t *rgb2yuv, int is_be)
  54. {
  55. int i;
  56. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  57. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  58. av_assert1(src1==src2);
  59. for (i = 0; i < width; i++) {
  60. unsigned int r_b = input_pixel(&src1[i*4+0]);
  61. unsigned int g = input_pixel(&src1[i*4+1]);
  62. unsigned int b_r = input_pixel(&src1[i*4+2]);
  63. dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  64. dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  65. }
  66. }
  67. static av_always_inline void
  68. rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
  69. const uint16_t *src1, const uint16_t *src2,
  70. int width, enum AVPixelFormat origin, int32_t *rgb2yuv, int is_be)
  71. {
  72. int i;
  73. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  74. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  75. av_assert1(src1==src2);
  76. for (i = 0; i < width; i++) {
  77. unsigned r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1;
  78. unsigned g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1;
  79. unsigned b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1;
  80. dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  81. dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  82. }
  83. }
  84. #define RGB64FUNCS_EXT(pattern, BE_LE, origin, is_be) \
  85. static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
  86. int width, uint32_t *rgb2yuv, void *opq) \
  87. { \
  88. const uint16_t *src = (const uint16_t *) _src; \
  89. uint16_t *dst = (uint16_t *) _dst; \
  90. rgb64ToY_c_template(dst, src, width, origin, rgb2yuv, is_be); \
  91. } \
  92. \
  93. static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
  94. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  95. int width, uint32_t *rgb2yuv, void *opq) \
  96. { \
  97. const uint16_t *src1 = (const uint16_t *) _src1, \
  98. *src2 = (const uint16_t *) _src2; \
  99. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  100. rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv, is_be); \
  101. } \
  102. \
  103. static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
  104. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  105. int width, uint32_t *rgb2yuv, void *opq) \
  106. { \
  107. const uint16_t *src1 = (const uint16_t *) _src1, \
  108. *src2 = (const uint16_t *) _src2; \
  109. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  110. rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv, is_be); \
  111. }
  112. #define RGB64FUNCS(pattern, endianness, base_fmt) \
  113. RGB64FUNCS_EXT(pattern, endianness, base_fmt ## endianness, IS_BE(endianness))
  114. RGB64FUNCS(rgb, LE, AV_PIX_FMT_RGBA64)
  115. RGB64FUNCS(rgb, BE, AV_PIX_FMT_RGBA64)
  116. RGB64FUNCS(bgr, LE, AV_PIX_FMT_BGRA64)
  117. RGB64FUNCS(bgr, BE, AV_PIX_FMT_BGRA64)
  118. static av_always_inline void rgb48ToY_c_template(uint16_t *dst,
  119. const uint16_t *src, int width,
  120. enum AVPixelFormat origin,
  121. int32_t *rgb2yuv, int is_be)
  122. {
  123. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  124. int i;
  125. for (i = 0; i < width; i++) {
  126. unsigned int r_b = input_pixel(&src[i * 3 + 0]);
  127. unsigned int g = input_pixel(&src[i * 3 + 1]);
  128. unsigned int b_r = input_pixel(&src[i * 3 + 2]);
  129. dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  130. }
  131. }
  132. static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU,
  133. uint16_t *dstV,
  134. const uint16_t *src1,
  135. const uint16_t *src2,
  136. int width,
  137. enum AVPixelFormat origin,
  138. int32_t *rgb2yuv, int is_be)
  139. {
  140. int i;
  141. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  142. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  143. av_assert1(src1 == src2);
  144. for (i = 0; i < width; i++) {
  145. unsigned r_b = input_pixel(&src1[i * 3 + 0]);
  146. unsigned g = input_pixel(&src1[i * 3 + 1]);
  147. unsigned b_r = input_pixel(&src1[i * 3 + 2]);
  148. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  149. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  150. }
  151. }
  152. static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU,
  153. uint16_t *dstV,
  154. const uint16_t *src1,
  155. const uint16_t *src2,
  156. int width,
  157. enum AVPixelFormat origin,
  158. int32_t *rgb2yuv, int is_be)
  159. {
  160. int i;
  161. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  162. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  163. av_assert1(src1 == src2);
  164. for (i = 0; i < width; i++) {
  165. unsigned r_b = (input_pixel(&src1[6 * i + 0]) +
  166. input_pixel(&src1[6 * i + 3]) + 1) >> 1;
  167. unsigned g = (input_pixel(&src1[6 * i + 1]) +
  168. input_pixel(&src1[6 * i + 4]) + 1) >> 1;
  169. unsigned b_r = (input_pixel(&src1[6 * i + 2]) +
  170. input_pixel(&src1[6 * i + 5]) + 1) >> 1;
  171. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  172. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  173. }
  174. }
  175. #undef r
  176. #undef b
  177. #undef input_pixel
  178. #define RGB48FUNCS_EXT(pattern, BE_LE, origin, is_be) \
  179. static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \
  180. const uint8_t *_src, \
  181. const uint8_t *unused0, const uint8_t *unused1,\
  182. int width, \
  183. uint32_t *rgb2yuv, \
  184. void *opq) \
  185. { \
  186. const uint16_t *src = (const uint16_t *)_src; \
  187. uint16_t *dst = (uint16_t *)_dst; \
  188. rgb48ToY_c_template(dst, src, width, origin, rgb2yuv, is_be); \
  189. } \
  190. \
  191. static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \
  192. uint8_t *_dstV, \
  193. const uint8_t *unused0, \
  194. const uint8_t *_src1, \
  195. const uint8_t *_src2, \
  196. int width, \
  197. uint32_t *rgb2yuv, \
  198. void *opq) \
  199. { \
  200. const uint16_t *src1 = (const uint16_t *)_src1, \
  201. *src2 = (const uint16_t *)_src2; \
  202. uint16_t *dstU = (uint16_t *)_dstU, \
  203. *dstV = (uint16_t *)_dstV; \
  204. rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv, is_be); \
  205. } \
  206. \
  207. static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \
  208. uint8_t *_dstV, \
  209. const uint8_t *unused0, \
  210. const uint8_t *_src1, \
  211. const uint8_t *_src2, \
  212. int width, \
  213. uint32_t *rgb2yuv, \
  214. void *opq) \
  215. { \
  216. const uint16_t *src1 = (const uint16_t *)_src1, \
  217. *src2 = (const uint16_t *)_src2; \
  218. uint16_t *dstU = (uint16_t *)_dstU, \
  219. *dstV = (uint16_t *)_dstV; \
  220. rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv, is_be); \
  221. }
  222. #define RGB48FUNCS(pattern, endianness, base_fmt) \
  223. RGB48FUNCS_EXT(pattern, endianness, base_fmt ## endianness, IS_BE(endianness))
  224. RGB48FUNCS(rgb, LE, AV_PIX_FMT_RGB48)
  225. RGB48FUNCS(rgb, BE, AV_PIX_FMT_RGB48)
  226. RGB48FUNCS(bgr, LE, AV_PIX_FMT_BGR48)
  227. RGB48FUNCS(bgr, BE, AV_PIX_FMT_BGR48)
  228. #define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \
  229. origin == AV_PIX_FMT_BGRA || \
  230. origin == AV_PIX_FMT_ARGB || \
  231. origin == AV_PIX_FMT_ABGR) \
  232. ? AV_RN32A(&src[(i) * 4]) \
  233. : ((origin == AV_PIX_FMT_X2RGB10LE || \
  234. origin == AV_PIX_FMT_X2BGR10LE) \
  235. ? AV_RL32(&src[(i) * 4]) \
  236. : (is_be ? AV_RB16(&src[(i) * 2]) \
  237. : AV_RL16(&src[(i) * 2]))))
  238. static av_always_inline void rgb16_32ToY_c_template(int16_t *dst,
  239. const uint8_t *src,
  240. int width,
  241. enum AVPixelFormat origin,
  242. int shr, int shg,
  243. int shb, int shp,
  244. int maskr, int maskg,
  245. int maskb, int rsh,
  246. int gsh, int bsh, int S,
  247. int32_t *rgb2yuv, int is_be)
  248. {
  249. const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh;
  250. const unsigned rnd = (32<<((S)-1)) + (1<<(S-7));
  251. int i;
  252. for (i = 0; i < width; i++) {
  253. int px = input_pixel(i) >> shp;
  254. int b = (px & maskb) >> shb;
  255. int g = (px & maskg) >> shg;
  256. int r = (px & maskr) >> shr;
  257. dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
  258. }
  259. }
  260. static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU,
  261. int16_t *dstV,
  262. const uint8_t *src,
  263. int width,
  264. enum AVPixelFormat origin,
  265. int shr, int shg,
  266. int shb, int shp,
  267. int maskr, int maskg,
  268. int maskb, int rsh,
  269. int gsh, int bsh, int S,
  270. int32_t *rgb2yuv, int is_be)
  271. {
  272. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  273. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh);
  274. const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7));
  275. int i;
  276. for (i = 0; i < width; i++) {
  277. int px = input_pixel(i) >> shp;
  278. int b = (px & maskb) >> shb;
  279. int g = (px & maskg) >> shg;
  280. int r = (px & maskr) >> shr;
  281. dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
  282. dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
  283. }
  284. }
  285. static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU,
  286. int16_t *dstV,
  287. const uint8_t *src,
  288. int width,
  289. enum AVPixelFormat origin,
  290. int shr, int shg,
  291. int shb, int shp,
  292. int maskr, int maskg,
  293. int maskb, int rsh,
  294. int gsh, int bsh, int S,
  295. int32_t *rgb2yuv, int is_be)
  296. {
  297. const int ru = rgb2yuv[RU_IDX] * (1 << rsh), gu = rgb2yuv[GU_IDX] * (1 << gsh), bu = rgb2yuv[BU_IDX] * (1 << bsh),
  298. rv = rgb2yuv[RV_IDX] * (1 << rsh), gv = rgb2yuv[GV_IDX] * (1 << gsh), bv = rgb2yuv[BV_IDX] * (1 << bsh),
  299. maskgx = ~(maskr | maskb);
  300. const unsigned rnd = (256U<<(S)) + (1<<(S-6));
  301. int i;
  302. maskr |= maskr << 1;
  303. maskb |= maskb << 1;
  304. maskg |= maskg << 1;
  305. for (i = 0; i < width; i++) {
  306. unsigned px0 = input_pixel(2 * i + 0) >> shp;
  307. unsigned px1 = input_pixel(2 * i + 1) >> shp;
  308. int b, r, g = (px0 & maskgx) + (px1 & maskgx);
  309. int rb = px0 + px1 - g;
  310. b = (rb & maskb) >> shb;
  311. if (shp ||
  312. origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE ||
  313. origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) {
  314. g >>= shg;
  315. } else {
  316. g = (g & maskg) >> shg;
  317. }
  318. r = (rb & maskr) >> shr;
  319. dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
  320. dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
  321. }
  322. }
  323. #undef input_pixel
  324. #define RGB16_32FUNCS_EXT(fmt, name, shr, shg, shb, shp, maskr, \
  325. maskg, maskb, rsh, gsh, bsh, S, is_be) \
  326. static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
  327. int width, uint32_t *tab, void *opq) \
  328. { \
  329. rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \
  330. maskr, maskg, maskb, rsh, gsh, bsh, S, tab, is_be); \
  331. } \
  332. \
  333. static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
  334. const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
  335. int width, uint32_t *tab, void *opq) \
  336. { \
  337. rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  338. shr, shg, shb, shp, \
  339. maskr, maskg, maskb, rsh, gsh, bsh, S, tab, is_be); \
  340. } \
  341. \
  342. static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
  343. const uint8_t *unused0, const uint8_t *src, \
  344. const uint8_t *dummy, \
  345. int width, uint32_t *tab, void *opq) \
  346. { \
  347. rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  348. shr, shg, shb, shp, \
  349. maskr, maskg, maskb, \
  350. rsh, gsh, bsh, S, tab, is_be); \
  351. }
  352. #define RGB16_32FUNCS(base_fmt, endianness, name, shr, shg, shb, shp, maskr, \
  353. maskg, maskb, rsh, gsh, bsh, S) \
  354. RGB16_32FUNCS_EXT(base_fmt ## endianness, name, shr, shg, shb, shp, maskr, \
  355. maskg, maskb, rsh, gsh, bsh, S, IS_BE(endianness))
  356. RGB16_32FUNCS(AV_PIX_FMT_BGR32, , bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  357. RGB16_32FUNCS(AV_PIX_FMT_BGR32_1, , bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8)
  358. RGB16_32FUNCS(AV_PIX_FMT_RGB32, , rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  359. RGB16_32FUNCS(AV_PIX_FMT_RGB32_1, , rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8)
  360. RGB16_32FUNCS(AV_PIX_FMT_BGR565, LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  361. RGB16_32FUNCS(AV_PIX_FMT_BGR555, LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  362. RGB16_32FUNCS(AV_PIX_FMT_BGR444, LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  363. RGB16_32FUNCS(AV_PIX_FMT_RGB565, LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  364. RGB16_32FUNCS(AV_PIX_FMT_RGB555, LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  365. RGB16_32FUNCS(AV_PIX_FMT_RGB444, LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  366. RGB16_32FUNCS(AV_PIX_FMT_BGR565, BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8)
  367. RGB16_32FUNCS(AV_PIX_FMT_BGR555, BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7)
  368. RGB16_32FUNCS(AV_PIX_FMT_BGR444, BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4)
  369. RGB16_32FUNCS(AV_PIX_FMT_RGB565, BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8)
  370. RGB16_32FUNCS(AV_PIX_FMT_RGB555, BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7)
  371. RGB16_32FUNCS(AV_PIX_FMT_RGB444, BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4)
  372. RGB16_32FUNCS(AV_PIX_FMT_X2RGB10, LE, rgb30le, 16, 6, 0, 0, 0x3FF00000, 0xFFC00, 0x3FF, 0, 0, 4, RGB2YUV_SHIFT + 6)
  373. RGB16_32FUNCS(AV_PIX_FMT_X2BGR10, LE, bgr30le, 0, 6, 16, 0, 0x3FF, 0xFFC00, 0x3FF00000, 4, 0, 0, RGB2YUV_SHIFT + 6)
  374. static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV,
  375. const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  376. int width, uint32_t *rgb2yuv, void *opq)
  377. {
  378. uint16_t *dstU = (uint16_t *)_dstU;
  379. uint16_t *dstV = (uint16_t *)_dstV;
  380. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  381. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  382. int i;
  383. for (i = 0; i < width; i++) {
  384. unsigned int g = gsrc[2*i] + gsrc[2*i+1];
  385. unsigned int b = bsrc[2*i] + bsrc[2*i+1];
  386. unsigned int r = rsrc[2*i] + rsrc[2*i+1];
  387. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  388. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  389. }
  390. }
  391. static void rgba64leToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  392. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  393. {
  394. int16_t *dst = (int16_t *)_dst;
  395. const uint16_t *src = (const uint16_t *)_src;
  396. int i;
  397. for (i = 0; i < width; i++)
  398. dst[i] = AV_RL16(src + 4 * i + 3);
  399. }
  400. static void rgba64beToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  401. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  402. {
  403. int16_t *dst = (int16_t *)_dst;
  404. const uint16_t *src = (const uint16_t *)_src;
  405. int i;
  406. for (i = 0; i < width; i++)
  407. dst[i] = AV_RB16(src + 4 * i + 3);
  408. }
  409. static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  410. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  411. {
  412. int16_t *dst = (int16_t *)_dst;
  413. int i;
  414. for (i=0; i<width; i++) {
  415. dst[i]= src[4*i]<<6 | src[4*i]>>2;
  416. }
  417. }
  418. static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  419. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  420. {
  421. int16_t *dst = (int16_t *)_dst;
  422. int i;
  423. for (i=0; i<width; i++) {
  424. dst[i]= src[4*i+3]<<6 | src[4*i+3]>>2;
  425. }
  426. }
  427. static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  428. const uint8_t *unused2, int width, uint32_t *pal, void *opq)
  429. {
  430. int16_t *dst = (int16_t *)_dst;
  431. int i;
  432. for (i=0; i<width; i++) {
  433. int d= src[i];
  434. dst[i]= (pal[d] >> 24)<<6 | pal[d]>>26;
  435. }
  436. }
  437. static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  438. const uint8_t *unused2, int width, uint32_t *pal, void *opq)
  439. {
  440. int16_t *dst = (int16_t *)_dst;
  441. int i;
  442. for (i = 0; i < width; i++) {
  443. int d = src[i];
  444. dst[i] = (pal[d] & 0xFF)<<6;
  445. }
  446. }
  447. static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV,
  448. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  449. int width, uint32_t *pal, void *opq)
  450. {
  451. uint16_t *dstU = (uint16_t *)_dstU;
  452. int16_t *dstV = (int16_t *)_dstV;
  453. int i;
  454. av_assert1(src1 == src2);
  455. for (i = 0; i < width; i++) {
  456. int p = pal[src1[i]];
  457. dstU[i] = (uint8_t)(p>> 8)<<6;
  458. dstV[i] = (uint8_t)(p>>16)<<6;
  459. }
  460. }
  461. static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  462. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  463. {
  464. int16_t *dst = (int16_t *)_dst;
  465. int i, j;
  466. width = (width + 7) >> 3;
  467. for (i = 0; i < width; i++) {
  468. int d = ~src[i];
  469. for (j = 0; j < 8; j++)
  470. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  471. }
  472. if(width&7){
  473. int d= ~src[i];
  474. for (j = 0; j < (width&7); j++)
  475. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  476. }
  477. }
  478. static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1,
  479. const uint8_t *unused2, int width, uint32_t *unused, void *opq)
  480. {
  481. int16_t *dst = (int16_t *)_dst;
  482. int i, j;
  483. width = (width + 7) >> 3;
  484. for (i = 0; i < width; i++) {
  485. int d = src[i];
  486. for (j = 0; j < 8; j++)
  487. dst[8*i+j]= ((d>>(7-j))&1) * 16383;
  488. }
  489. if(width&7){
  490. int d = src[i];
  491. for (j = 0; j < (width&7); j++)
  492. dst[8*i+j] = ((d>>(7-j))&1) * 16383;
  493. }
  494. }
  495. static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  496. uint32_t *unused, void *opq)
  497. {
  498. int i;
  499. for (i = 0; i < width; i++)
  500. dst[i] = src[2 * i];
  501. }
  502. static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  503. const uint8_t *src2, int width, uint32_t *unused, void *opq)
  504. {
  505. int i;
  506. for (i = 0; i < width; i++) {
  507. dstU[i] = src1[4 * i + 1];
  508. dstV[i] = src1[4 * i + 3];
  509. }
  510. av_assert1(src1 == src2);
  511. }
  512. static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  513. const uint8_t *src2, int width, uint32_t *unused, void *opq)
  514. {
  515. int i;
  516. for (i = 0; i < width; i++) {
  517. dstV[i] = src1[4 * i + 1];
  518. dstU[i] = src1[4 * i + 3];
  519. }
  520. av_assert1(src1 == src2);
  521. }
  522. #define y21xle_wrapper(bits, shift) \
  523. static void y2 ## bits ## le_UV_c(uint8_t *dstU, uint8_t *dstV, \
  524. const uint8_t *unused0, \
  525. const uint8_t *src, \
  526. const uint8_t *unused1, int width, \
  527. uint32_t *unused2, void *opq) \
  528. { \
  529. int i; \
  530. for (i = 0; i < width; i++) { \
  531. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 2) >> shift); \
  532. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6) >> shift); \
  533. } \
  534. } \
  535. \
  536. static void y2 ## bits ## le_Y_c(uint8_t *dst, const uint8_t *src, \
  537. const uint8_t *unused0, \
  538. const uint8_t *unused1, int width, \
  539. uint32_t *unused2, void *opq) \
  540. { \
  541. int i; \
  542. for (i = 0; i < width; i++) \
  543. AV_WN16(dst + i * 2, AV_RL16(src + i * 4) >> shift); \
  544. }
  545. y21xle_wrapper(10, 6)
  546. y21xle_wrapper(12, 4)
  547. static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width,
  548. uint32_t *unused, void *opq)
  549. {
  550. int i;
  551. const uint16_t *src = (const uint16_t *)_src;
  552. uint16_t *dst = (uint16_t *)_dst;
  553. for (i = 0; i < width; i++)
  554. dst[i] = av_bswap16(src[i]);
  555. }
  556. static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
  557. const uint8_t *_src2, int width, uint32_t *unused, void *opq)
  558. {
  559. int i;
  560. const uint16_t *src1 = (const uint16_t *)_src1,
  561. *src2 = (const uint16_t *)_src2;
  562. uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV;
  563. for (i = 0; i < width; i++) {
  564. dstU[i] = av_bswap16(src1[i]);
  565. dstV[i] = av_bswap16(src2[i]);
  566. }
  567. }
  568. static void read_ya16le_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  569. uint32_t *unused, void *opq)
  570. {
  571. int i;
  572. for (i = 0; i < width; i++)
  573. AV_WN16(dst + i * 2, AV_RL16(src + i * 4));
  574. }
  575. static void read_ya16le_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  576. uint32_t *unused, void *opq)
  577. {
  578. int i;
  579. for (i = 0; i < width; i++)
  580. AV_WN16(dst + i * 2, AV_RL16(src + i * 4 + 2));
  581. }
  582. static void read_ya16be_gray_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  583. uint32_t *unused, void *opq)
  584. {
  585. int i;
  586. for (i = 0; i < width; i++)
  587. AV_WN16(dst + i * 2, AV_RB16(src + i * 4));
  588. }
  589. static void read_ya16be_alpha_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  590. uint32_t *unused, void *opq)
  591. {
  592. int i;
  593. for (i = 0; i < width; i++)
  594. AV_WN16(dst + i * 2, AV_RB16(src + i * 4 + 2));
  595. }
  596. static void read_ayuv64le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  597. uint32_t *unused2, void *opq)
  598. {
  599. int i;
  600. for (i = 0; i < width; i++)
  601. AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2));
  602. }
  603. static void read_ayuv64le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  604. const uint8_t *unused1, int width, uint32_t *unused2, void *opq)
  605. {
  606. int i;
  607. for (i = 0; i < width; i++) {
  608. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 4));
  609. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 6));
  610. }
  611. }
  612. static void read_ayuv64le_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  613. uint32_t *unused2, void *opq)
  614. {
  615. int i;
  616. for (i = 0; i < width; i++)
  617. AV_WN16(dst + i * 2, AV_RL16(src + i * 8));
  618. }
  619. static void read_vuyx_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  620. const uint8_t *unused1, int width, uint32_t *unused2, void *opq)
  621. {
  622. int i;
  623. for (i = 0; i < width; i++) {
  624. dstU[i] = src[i * 4 + 1];
  625. dstV[i] = src[i * 4];
  626. }
  627. }
  628. static void read_vuyx_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  629. uint32_t *unused2, void *opq)
  630. {
  631. int i;
  632. for (i = 0; i < width; i++)
  633. dst[i] = src[i * 4 + 2];
  634. }
  635. static void read_vuya_A_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  636. uint32_t *unused2, void *opq)
  637. {
  638. int i;
  639. for (i = 0; i < width; i++)
  640. dst[i] = src[i * 4 + 3];
  641. }
  642. static void read_xv30le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  643. uint32_t *unused2, void *opq)
  644. {
  645. int i;
  646. for (i = 0; i < width; i++)
  647. AV_WN16(dst + i * 2, (AV_RL32(src + i * 4) >> 10) & 0x3FFu);
  648. }
  649. static void read_xv30le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  650. const uint8_t *unused1, int width, uint32_t *unused2, void *opq)
  651. {
  652. int i;
  653. for (i = 0; i < width; i++) {
  654. AV_WN16(dstU + i * 2, AV_RL32(src + i * 4) & 0x3FFu);
  655. AV_WN16(dstV + i * 2, (AV_RL32(src + i * 4) >> 20) & 0x3FFu);
  656. }
  657. }
  658. static void read_xv36le_Y_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused0, const uint8_t *unused1, int width,
  659. uint32_t *unused2, void *opq)
  660. {
  661. int i;
  662. for (i = 0; i < width; i++)
  663. AV_WN16(dst + i * 2, AV_RL16(src + i * 8 + 2) >> 4);
  664. }
  665. static void read_xv36le_UV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src,
  666. const uint8_t *unused1, int width, uint32_t *unused2, void *opq)
  667. {
  668. int i;
  669. for (i = 0; i < width; i++) {
  670. AV_WN16(dstU + i * 2, AV_RL16(src + i * 8 + 0) >> 4);
  671. AV_WN16(dstV + i * 2, AV_RL16(src + i * 8 + 4) >> 4);
  672. }
  673. }
  674. /* This is almost identical to the previous, end exists only because
  675. * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */
  676. static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  677. uint32_t *unused, void *opq)
  678. {
  679. int i;
  680. for (i = 0; i < width; i++)
  681. dst[i] = src[2 * i + 1];
  682. }
  683. static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  684. const uint8_t *src2, int width, uint32_t *unused, void *opq)
  685. {
  686. int i;
  687. for (i = 0; i < width; i++) {
  688. dstU[i] = src1[4 * i + 0];
  689. dstV[i] = src1[4 * i + 2];
  690. }
  691. av_assert1(src1 == src2);
  692. }
  693. static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
  694. const uint8_t *src, int width)
  695. {
  696. int i;
  697. for (i = 0; i < width; i++) {
  698. dst1[i] = src[2 * i + 0];
  699. dst2[i] = src[2 * i + 1];
  700. }
  701. }
  702. static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
  703. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  704. int width, uint32_t *unused, void *opq)
  705. {
  706. nvXXtoUV_c(dstU, dstV, src1, width);
  707. }
  708. static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
  709. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  710. int width, uint32_t *unused, void *opq)
  711. {
  712. nvXXtoUV_c(dstV, dstU, src1, width);
  713. }
  714. #define p01x_uv_wrapper(bits, shift) \
  715. static void p0 ## bits ## LEToUV_c(uint8_t *dstU, uint8_t *dstV, \
  716. const uint8_t *unused0, \
  717. const uint8_t *src1, \
  718. const uint8_t *src2, int width, \
  719. uint32_t *unused, void *opq) \
  720. { \
  721. int i; \
  722. for (i = 0; i < width; i++) { \
  723. AV_WN16(dstU + i * 2, AV_RL16(src1 + i * 4 + 0) >> shift); \
  724. AV_WN16(dstV + i * 2, AV_RL16(src1 + i * 4 + 2) >> shift); \
  725. } \
  726. } \
  727. \
  728. static void p0 ## bits ## BEToUV_c(uint8_t *dstU, uint8_t *dstV, \
  729. const uint8_t *unused0, \
  730. const uint8_t *src1, \
  731. const uint8_t *src2, int width, \
  732. uint32_t *unused, void *opq) \
  733. { \
  734. int i; \
  735. for (i = 0; i < width; i++) { \
  736. AV_WN16(dstU + i * 2, AV_RB16(src1 + i * 4 + 0) >> shift); \
  737. AV_WN16(dstV + i * 2, AV_RB16(src1 + i * 4 + 2) >> shift); \
  738. } \
  739. }
  740. #define p01x_wrapper(bits, shift) \
  741. static void p0 ## bits ## LEToY_c(uint8_t *dst, const uint8_t *src, \
  742. const uint8_t *unused1, \
  743. const uint8_t *unused2, int width, \
  744. uint32_t *unused, void *opq) \
  745. { \
  746. int i; \
  747. for (i = 0; i < width; i++) { \
  748. AV_WN16(dst + i * 2, AV_RL16(src + i * 2) >> shift); \
  749. } \
  750. } \
  751. \
  752. static void p0 ## bits ## BEToY_c(uint8_t *dst, const uint8_t *src, \
  753. const uint8_t *unused1, \
  754. const uint8_t *unused2, int width, \
  755. uint32_t *unused, void *opq) \
  756. { \
  757. int i; \
  758. for (i = 0; i < width; i++) { \
  759. AV_WN16(dst + i * 2, AV_RB16(src + i * 2) >> shift); \
  760. } \
  761. } \
  762. p01x_uv_wrapper(bits, shift)
  763. p01x_wrapper(10, 6)
  764. p01x_wrapper(12, 4)
  765. p01x_uv_wrapper(16, 0)
  766. static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
  767. int width, uint32_t *rgb2yuv, void *opq)
  768. {
  769. int16_t *dst = (int16_t *)_dst;
  770. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  771. int i;
  772. for (i = 0; i < width; i++) {
  773. int b = src[i * 3 + 0];
  774. int g = src[i * 3 + 1];
  775. int r = src[i * 3 + 2];
  776. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  777. }
  778. }
  779. static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  780. const uint8_t *src2, int width, uint32_t *rgb2yuv, void *opq)
  781. {
  782. int16_t *dstU = (int16_t *)_dstU;
  783. int16_t *dstV = (int16_t *)_dstV;
  784. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  785. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  786. int i;
  787. for (i = 0; i < width; i++) {
  788. int b = src1[3 * i + 0];
  789. int g = src1[3 * i + 1];
  790. int r = src1[3 * i + 2];
  791. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  792. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  793. }
  794. av_assert1(src1 == src2);
  795. }
  796. static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  797. const uint8_t *src2, int width, uint32_t *rgb2yuv, void *opq)
  798. {
  799. int16_t *dstU = (int16_t *)_dstU;
  800. int16_t *dstV = (int16_t *)_dstV;
  801. int i;
  802. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  803. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  804. for (i = 0; i < width; i++) {
  805. int b = src1[6 * i + 0] + src1[6 * i + 3];
  806. int g = src1[6 * i + 1] + src1[6 * i + 4];
  807. int r = src1[6 * i + 2] + src1[6 * i + 5];
  808. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  809. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  810. }
  811. av_assert1(src1 == src2);
  812. }
  813. static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  814. uint32_t *rgb2yuv, void *opq)
  815. {
  816. int16_t *dst = (int16_t *)_dst;
  817. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  818. int i;
  819. for (i = 0; i < width; i++) {
  820. int r = src[i * 3 + 0];
  821. int g = src[i * 3 + 1];
  822. int b = src[i * 3 + 2];
  823. dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  824. }
  825. }
  826. static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  827. const uint8_t *src2, int width, uint32_t *rgb2yuv, void *opq)
  828. {
  829. int16_t *dstU = (int16_t *)_dstU;
  830. int16_t *dstV = (int16_t *)_dstV;
  831. int i;
  832. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  833. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  834. av_assert1(src1 == src2);
  835. for (i = 0; i < width; i++) {
  836. int r = src1[3 * i + 0];
  837. int g = src1[3 * i + 1];
  838. int b = src1[3 * i + 2];
  839. dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  840. dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  841. }
  842. }
  843. static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1,
  844. const uint8_t *src2, int width, uint32_t *rgb2yuv, void *opq)
  845. {
  846. int16_t *dstU = (int16_t *)_dstU;
  847. int16_t *dstV = (int16_t *)_dstV;
  848. int i;
  849. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  850. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  851. av_assert1(src1 == src2);
  852. for (i = 0; i < width; i++) {
  853. int r = src1[6 * i + 0] + src1[6 * i + 3];
  854. int g = src1[6 * i + 1] + src1[6 * i + 4];
  855. int b = src1[6 * i + 2] + src1[6 * i + 5];
  856. dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  857. dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  858. }
  859. }
  860. static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv, void *opq)
  861. {
  862. uint16_t *dst = (uint16_t *)_dst;
  863. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  864. int i;
  865. for (i = 0; i < width; i++) {
  866. int g = src[0][i];
  867. int b = src[1][i];
  868. int r = src[2][i];
  869. dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  870. }
  871. }
  872. static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused, void *opq)
  873. {
  874. uint16_t *dst = (uint16_t *)_dst;
  875. int i;
  876. for (i = 0; i < width; i++)
  877. dst[i] = src[3][i] << 6;
  878. }
  879. static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv, void *opq)
  880. {
  881. uint16_t *dstU = (uint16_t *)_dstU;
  882. uint16_t *dstV = (uint16_t *)_dstV;
  883. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  884. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  885. int i;
  886. for (i = 0; i < width; i++) {
  887. int g = src[0][i];
  888. int b = src[1][i];
  889. int r = src[2][i];
  890. dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  891. dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  892. }
  893. }
  894. #define rdpx(src) \
  895. (is_be ? AV_RB16(src) : AV_RL16(src))
  896. static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
  897. int width, int bpc, int is_be, int32_t *rgb2yuv)
  898. {
  899. int i;
  900. const uint16_t **src = (const uint16_t **)_src;
  901. uint16_t *dst = (uint16_t *)_dst;
  902. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  903. int shift = bpc < 16 ? bpc : 14;
  904. for (i = 0; i < width; i++) {
  905. int g = rdpx(src[0] + i);
  906. int b = rdpx(src[1] + i);
  907. int r = rdpx(src[2] + i);
  908. dst[i] = (ry*r + gy*g + by*b + (16 << (RGB2YUV_SHIFT + bpc - 8)) + (1 << (RGB2YUV_SHIFT + shift - 15))) >> (RGB2YUV_SHIFT + shift - 14);
  909. }
  910. }
  911. static av_always_inline void planar_rgb16_to_a(uint8_t *_dst, const uint8_t *_src[4],
  912. int width, int bpc, int is_be, int32_t *rgb2yuv)
  913. {
  914. int i;
  915. const uint16_t **src = (const uint16_t **)_src;
  916. uint16_t *dst = (uint16_t *)_dst;
  917. int shift = bpc < 16 ? bpc : 14;
  918. for (i = 0; i < width; i++) {
  919. dst[i] = rdpx(src[3] + i) << (14 - shift);
  920. }
  921. }
  922. static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
  923. const uint8_t *_src[4], int width,
  924. int bpc, int is_be, int32_t *rgb2yuv)
  925. {
  926. int i;
  927. const uint16_t **src = (const uint16_t **)_src;
  928. uint16_t *dstU = (uint16_t *)_dstU;
  929. uint16_t *dstV = (uint16_t *)_dstV;
  930. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  931. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  932. int shift = bpc < 16 ? bpc : 14;
  933. for (i = 0; i < width; i++) {
  934. int g = rdpx(src[0] + i);
  935. int b = rdpx(src[1] + i);
  936. int r = rdpx(src[2] + i);
  937. dstU[i] = (ru*r + gu*g + bu*b + (128 << (RGB2YUV_SHIFT + bpc - 8)) + (1 << (RGB2YUV_SHIFT + shift - 15))) >> (RGB2YUV_SHIFT + shift - 14);
  938. dstV[i] = (rv*r + gv*g + bv*b + (128 << (RGB2YUV_SHIFT + bpc - 8)) + (1 << (RGB2YUV_SHIFT + shift - 15))) >> (RGB2YUV_SHIFT + shift - 14);
  939. }
  940. }
  941. #undef rdpx
  942. #define rdpx(src) (is_be ? av_int2float(AV_RB32(src)): av_int2float(AV_RL32(src)))
  943. static av_always_inline void planar_rgbf32_to_a(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  944. {
  945. int i;
  946. const float **src = (const float **)_src;
  947. uint16_t *dst = (uint16_t *)_dst;
  948. for (i = 0; i < width; i++) {
  949. dst[i] = lrintf(av_clipf(65535.0f * rdpx(src[3] + i), 0.0f, 65535.0f));
  950. }
  951. }
  952. static av_always_inline void planar_rgbf32_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  953. {
  954. int i;
  955. const float **src = (const float **)_src;
  956. uint16_t *dstU = (uint16_t *)_dstU;
  957. uint16_t *dstV = (uint16_t *)_dstV;
  958. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  959. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  960. for (i = 0; i < width; i++) {
  961. int g = lrintf(av_clipf(65535.0f * rdpx(src[0] + i), 0.0f, 65535.0f));
  962. int b = lrintf(av_clipf(65535.0f * rdpx(src[1] + i), 0.0f, 65535.0f));
  963. int r = lrintf(av_clipf(65535.0f * rdpx(src[2] + i), 0.0f, 65535.0f));
  964. dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  965. dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  966. }
  967. }
  968. static av_always_inline void planar_rgbf32_to_y(uint8_t *_dst, const uint8_t *_src[4], int width, int is_be, int32_t *rgb2yuv)
  969. {
  970. int i;
  971. const float **src = (const float **)_src;
  972. uint16_t *dst = (uint16_t *)_dst;
  973. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  974. for (i = 0; i < width; i++) {
  975. int g = lrintf(av_clipf(65535.0f * rdpx(src[0] + i), 0.0f, 65535.0f));
  976. int b = lrintf(av_clipf(65535.0f * rdpx(src[1] + i), 0.0f, 65535.0f));
  977. int r = lrintf(av_clipf(65535.0f * rdpx(src[2] + i), 0.0f, 65535.0f));
  978. dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT;
  979. }
  980. }
  981. static av_always_inline void grayf32ToY16_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1,
  982. const uint8_t *unused2, int width, int is_be, uint32_t *unused)
  983. {
  984. int i;
  985. const float *src = (const float *)_src;
  986. uint16_t *dst = (uint16_t *)_dst;
  987. for (i = 0; i < width; ++i){
  988. dst[i] = lrintf(av_clipf(65535.0f * rdpx(src + i), 0.0f, 65535.0f));
  989. }
  990. }
  991. #undef rdpx
  992. #define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \
  993. static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
  994. int w, int32_t *rgb2yuv, void *opq) \
  995. { \
  996. planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \
  997. } \
  998. static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
  999. const uint8_t *src[4], int w, int32_t *rgb2yuv, \
  1000. void *opq) \
  1001. { \
  1002. planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \
  1003. } \
  1004. #define rgb9plus_planar_transparency_funcs(nbits) \
  1005. static void planar_rgb##nbits##le_to_a(uint8_t *dst, const uint8_t *src[4], \
  1006. int w, int32_t *rgb2yuv, \
  1007. void *opq) \
  1008. { \
  1009. planar_rgb16_to_a(dst, src, w, nbits, 0, rgb2yuv); \
  1010. } \
  1011. static void planar_rgb##nbits##be_to_a(uint8_t *dst, const uint8_t *src[4], \
  1012. int w, int32_t *rgb2yuv, \
  1013. void *opq) \
  1014. { \
  1015. planar_rgb16_to_a(dst, src, w, nbits, 1, rgb2yuv); \
  1016. }
  1017. #define rgb9plus_planar_funcs(nbits) \
  1018. rgb9plus_planar_funcs_endian(nbits, le, 0) \
  1019. rgb9plus_planar_funcs_endian(nbits, be, 1)
  1020. rgb9plus_planar_funcs(9)
  1021. rgb9plus_planar_funcs(10)
  1022. rgb9plus_planar_funcs(12)
  1023. rgb9plus_planar_funcs(14)
  1024. rgb9plus_planar_funcs(16)
  1025. rgb9plus_planar_transparency_funcs(10)
  1026. rgb9plus_planar_transparency_funcs(12)
  1027. rgb9plus_planar_transparency_funcs(14)
  1028. rgb9plus_planar_transparency_funcs(16)
  1029. #define rgbf32_planar_funcs_endian(endian_name, endian) \
  1030. static void planar_rgbf32##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \
  1031. int w, int32_t *rgb2yuv, void *opq) \
  1032. { \
  1033. planar_rgbf32_to_y(dst, src, w, endian, rgb2yuv); \
  1034. } \
  1035. static void planar_rgbf32##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \
  1036. const uint8_t *src[4], int w, int32_t *rgb2yuv, \
  1037. void *opq) \
  1038. { \
  1039. planar_rgbf32_to_uv(dstU, dstV, src, w, endian, rgb2yuv); \
  1040. } \
  1041. static void planar_rgbf32##endian_name##_to_a(uint8_t *dst, const uint8_t *src[4], \
  1042. int w, int32_t *rgb2yuv, void *opq) \
  1043. { \
  1044. planar_rgbf32_to_a(dst, src, w, endian, rgb2yuv); \
  1045. } \
  1046. static void grayf32##endian_name##ToY16_c(uint8_t *dst, const uint8_t *src, \
  1047. const uint8_t *unused1, const uint8_t *unused2, \
  1048. int width, uint32_t *unused, void *opq) \
  1049. { \
  1050. grayf32ToY16_c(dst, src, unused1, unused2, width, endian, unused); \
  1051. }
  1052. rgbf32_planar_funcs_endian(le, 0)
  1053. rgbf32_planar_funcs_endian(be, 1)
  1054. #define rdpx(src) av_int2float(half2float(is_be ? AV_RB16(&src) : AV_RL16(&src), h2f_tbl))
  1055. static av_always_inline void rgbaf16ToUV_half_endian(uint16_t *dstU, uint16_t *dstV, int is_be,
  1056. const uint16_t *src, int width,
  1057. int32_t *rgb2yuv, Half2FloatTables *h2f_tbl)
  1058. {
  1059. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  1060. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  1061. int i;
  1062. for (i = 0; i < width; i++) {
  1063. int r = (lrintf(av_clipf(65535.0f * rdpx(src[i*8+0]), 0.0f, 65535.0f)) +
  1064. lrintf(av_clipf(65535.0f * rdpx(src[i*8+4]), 0.0f, 65535.0f))) >> 1;
  1065. int g = (lrintf(av_clipf(65535.0f * rdpx(src[i*8+1]), 0.0f, 65535.0f)) +
  1066. lrintf(av_clipf(65535.0f * rdpx(src[i*8+5]), 0.0f, 65535.0f))) >> 1;
  1067. int b = (lrintf(av_clipf(65535.0f * rdpx(src[i*8+2]), 0.0f, 65535.0f)) +
  1068. lrintf(av_clipf(65535.0f * rdpx(src[i*8+6]), 0.0f, 65535.0f))) >> 1;
  1069. dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1070. dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1071. }
  1072. }
  1073. static av_always_inline void rgbaf16ToUV_endian(uint16_t *dstU, uint16_t *dstV, int is_be,
  1074. const uint16_t *src, int width,
  1075. int32_t *rgb2yuv, Half2FloatTables *h2f_tbl)
  1076. {
  1077. int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
  1078. int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
  1079. int i;
  1080. for (i = 0; i < width; i++) {
  1081. int r = lrintf(av_clipf(65535.0f * rdpx(src[i*4+0]), 0.0f, 65535.0f));
  1082. int g = lrintf(av_clipf(65535.0f * rdpx(src[i*4+1]), 0.0f, 65535.0f));
  1083. int b = lrintf(av_clipf(65535.0f * rdpx(src[i*4+2]), 0.0f, 65535.0f));
  1084. dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1085. dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1086. }
  1087. }
  1088. static av_always_inline void rgbaf16ToY_endian(uint16_t *dst, const uint16_t *src, int is_be,
  1089. int width, int32_t *rgb2yuv, Half2FloatTables *h2f_tbl)
  1090. {
  1091. int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
  1092. int i;
  1093. for (i = 0; i < width; i++) {
  1094. int r = lrintf(av_clipf(65535.0f * rdpx(src[i*4+0]), 0.0f, 65535.0f));
  1095. int g = lrintf(av_clipf(65535.0f * rdpx(src[i*4+1]), 0.0f, 65535.0f));
  1096. int b = lrintf(av_clipf(65535.0f * rdpx(src[i*4+2]), 0.0f, 65535.0f));
  1097. dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1098. }
  1099. }
  1100. static av_always_inline void rgbaf16ToA_endian(uint16_t *dst, const uint16_t *src, int is_be,
  1101. int width, Half2FloatTables *h2f_tbl)
  1102. {
  1103. int i;
  1104. for (i=0; i<width; i++) {
  1105. dst[i] = lrintf(av_clipf(65535.0f * rdpx(src[i*4+3]), 0.0f, 65535.0f));
  1106. }
  1107. }
  1108. #undef rdpx
  1109. #define rgbaf16_funcs_endian(endian_name, endian) \
  1110. static void rgbaf16##endian_name##ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused, \
  1111. const uint8_t *src1, const uint8_t *src2, \
  1112. int width, uint32_t *_rgb2yuv, void *opq) \
  1113. { \
  1114. const uint16_t *src = (const uint16_t*)src1; \
  1115. uint16_t *dstU = (uint16_t*)_dstU; \
  1116. uint16_t *dstV = (uint16_t*)_dstV; \
  1117. int32_t *rgb2yuv = (int32_t*)_rgb2yuv; \
  1118. av_assert1(src1==src2); \
  1119. rgbaf16ToUV_half_endian(dstU, dstV, endian, src, width, rgb2yuv, opq); \
  1120. } \
  1121. static void rgbaf16##endian_name##ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused, \
  1122. const uint8_t *src1, const uint8_t *src2, \
  1123. int width, uint32_t *_rgb2yuv, void *opq) \
  1124. { \
  1125. const uint16_t *src = (const uint16_t*)src1; \
  1126. uint16_t *dstU = (uint16_t*)_dstU; \
  1127. uint16_t *dstV = (uint16_t*)_dstV; \
  1128. int32_t *rgb2yuv = (int32_t*)_rgb2yuv; \
  1129. av_assert1(src1==src2); \
  1130. rgbaf16ToUV_endian(dstU, dstV, endian, src, width, rgb2yuv, opq); \
  1131. } \
  1132. static void rgbaf16##endian_name##ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, \
  1133. const uint8_t *unused1, int width, uint32_t *_rgb2yuv, void *opq) \
  1134. { \
  1135. const uint16_t *src = (const uint16_t*)_src; \
  1136. uint16_t *dst = (uint16_t*)_dst; \
  1137. int32_t *rgb2yuv = (int32_t*)_rgb2yuv; \
  1138. rgbaf16ToY_endian(dst, src, endian, width, rgb2yuv, opq); \
  1139. } \
  1140. static void rgbaf16##endian_name##ToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, \
  1141. const uint8_t *unused1, int width, uint32_t *unused2, void *opq) \
  1142. { \
  1143. const uint16_t *src = (const uint16_t*)_src; \
  1144. uint16_t *dst = (uint16_t*)_dst; \
  1145. rgbaf16ToA_endian(dst, src, endian, width, opq); \
  1146. }
  1147. rgbaf16_funcs_endian(le, 0)
  1148. rgbaf16_funcs_endian(be, 1)
  1149. av_cold void ff_sws_init_input_funcs(SwsContext *c)
  1150. {
  1151. enum AVPixelFormat srcFormat = c->srcFormat;
  1152. c->chrToYV12 = NULL;
  1153. switch (srcFormat) {
  1154. case AV_PIX_FMT_YUYV422:
  1155. c->chrToYV12 = yuy2ToUV_c;
  1156. break;
  1157. case AV_PIX_FMT_YVYU422:
  1158. c->chrToYV12 = yvy2ToUV_c;
  1159. break;
  1160. case AV_PIX_FMT_UYVY422:
  1161. c->chrToYV12 = uyvyToUV_c;
  1162. break;
  1163. case AV_PIX_FMT_NV12:
  1164. case AV_PIX_FMT_NV16:
  1165. case AV_PIX_FMT_NV24:
  1166. c->chrToYV12 = nv12ToUV_c;
  1167. break;
  1168. case AV_PIX_FMT_NV21:
  1169. case AV_PIX_FMT_NV42:
  1170. c->chrToYV12 = nv21ToUV_c;
  1171. break;
  1172. case AV_PIX_FMT_RGB8:
  1173. case AV_PIX_FMT_BGR8:
  1174. case AV_PIX_FMT_PAL8:
  1175. case AV_PIX_FMT_BGR4_BYTE:
  1176. case AV_PIX_FMT_RGB4_BYTE:
  1177. c->chrToYV12 = palToUV_c;
  1178. break;
  1179. case AV_PIX_FMT_GBRP9LE:
  1180. c->readChrPlanar = planar_rgb9le_to_uv;
  1181. break;
  1182. case AV_PIX_FMT_GBRAP10LE:
  1183. case AV_PIX_FMT_GBRP10LE:
  1184. c->readChrPlanar = planar_rgb10le_to_uv;
  1185. break;
  1186. case AV_PIX_FMT_GBRAP12LE:
  1187. case AV_PIX_FMT_GBRP12LE:
  1188. c->readChrPlanar = planar_rgb12le_to_uv;
  1189. break;
  1190. case AV_PIX_FMT_GBRAP14LE:
  1191. case AV_PIX_FMT_GBRP14LE:
  1192. c->readChrPlanar = planar_rgb14le_to_uv;
  1193. break;
  1194. case AV_PIX_FMT_GBRAP16LE:
  1195. case AV_PIX_FMT_GBRP16LE:
  1196. c->readChrPlanar = planar_rgb16le_to_uv;
  1197. break;
  1198. case AV_PIX_FMT_GBRAPF32LE:
  1199. case AV_PIX_FMT_GBRPF32LE:
  1200. c->readChrPlanar = planar_rgbf32le_to_uv;
  1201. break;
  1202. case AV_PIX_FMT_GBRP9BE:
  1203. c->readChrPlanar = planar_rgb9be_to_uv;
  1204. break;
  1205. case AV_PIX_FMT_GBRAP10BE:
  1206. case AV_PIX_FMT_GBRP10BE:
  1207. c->readChrPlanar = planar_rgb10be_to_uv;
  1208. break;
  1209. case AV_PIX_FMT_GBRAP12BE:
  1210. case AV_PIX_FMT_GBRP12BE:
  1211. c->readChrPlanar = planar_rgb12be_to_uv;
  1212. break;
  1213. case AV_PIX_FMT_GBRAP14BE:
  1214. case AV_PIX_FMT_GBRP14BE:
  1215. c->readChrPlanar = planar_rgb14be_to_uv;
  1216. break;
  1217. case AV_PIX_FMT_GBRAP16BE:
  1218. case AV_PIX_FMT_GBRP16BE:
  1219. c->readChrPlanar = planar_rgb16be_to_uv;
  1220. break;
  1221. case AV_PIX_FMT_GBRAPF32BE:
  1222. case AV_PIX_FMT_GBRPF32BE:
  1223. c->readChrPlanar = planar_rgbf32be_to_uv;
  1224. break;
  1225. case AV_PIX_FMT_GBRAP:
  1226. case AV_PIX_FMT_GBRP:
  1227. c->readChrPlanar = planar_rgb_to_uv;
  1228. break;
  1229. #if HAVE_BIGENDIAN
  1230. case AV_PIX_FMT_YUV420P9LE:
  1231. case AV_PIX_FMT_YUV422P9LE:
  1232. case AV_PIX_FMT_YUV444P9LE:
  1233. case AV_PIX_FMT_YUV420P10LE:
  1234. case AV_PIX_FMT_YUV422P10LE:
  1235. case AV_PIX_FMT_YUV440P10LE:
  1236. case AV_PIX_FMT_YUV444P10LE:
  1237. case AV_PIX_FMT_YUV420P12LE:
  1238. case AV_PIX_FMT_YUV422P12LE:
  1239. case AV_PIX_FMT_YUV440P12LE:
  1240. case AV_PIX_FMT_YUV444P12LE:
  1241. case AV_PIX_FMT_YUV420P14LE:
  1242. case AV_PIX_FMT_YUV422P14LE:
  1243. case AV_PIX_FMT_YUV444P14LE:
  1244. case AV_PIX_FMT_YUV420P16LE:
  1245. case AV_PIX_FMT_YUV422P16LE:
  1246. case AV_PIX_FMT_YUV444P16LE:
  1247. case AV_PIX_FMT_YUVA420P9LE:
  1248. case AV_PIX_FMT_YUVA422P9LE:
  1249. case AV_PIX_FMT_YUVA444P9LE:
  1250. case AV_PIX_FMT_YUVA420P10LE:
  1251. case AV_PIX_FMT_YUVA422P10LE:
  1252. case AV_PIX_FMT_YUVA444P10LE:
  1253. case AV_PIX_FMT_YUVA422P12LE:
  1254. case AV_PIX_FMT_YUVA444P12LE:
  1255. case AV_PIX_FMT_YUVA420P16LE:
  1256. case AV_PIX_FMT_YUVA422P16LE:
  1257. case AV_PIX_FMT_YUVA444P16LE:
  1258. c->chrToYV12 = bswap16UV_c;
  1259. break;
  1260. #else
  1261. case AV_PIX_FMT_YUV420P9BE:
  1262. case AV_PIX_FMT_YUV422P9BE:
  1263. case AV_PIX_FMT_YUV444P9BE:
  1264. case AV_PIX_FMT_YUV420P10BE:
  1265. case AV_PIX_FMT_YUV422P10BE:
  1266. case AV_PIX_FMT_YUV440P10BE:
  1267. case AV_PIX_FMT_YUV444P10BE:
  1268. case AV_PIX_FMT_YUV420P12BE:
  1269. case AV_PIX_FMT_YUV422P12BE:
  1270. case AV_PIX_FMT_YUV440P12BE:
  1271. case AV_PIX_FMT_YUV444P12BE:
  1272. case AV_PIX_FMT_YUV420P14BE:
  1273. case AV_PIX_FMT_YUV422P14BE:
  1274. case AV_PIX_FMT_YUV444P14BE:
  1275. case AV_PIX_FMT_YUV420P16BE:
  1276. case AV_PIX_FMT_YUV422P16BE:
  1277. case AV_PIX_FMT_YUV444P16BE:
  1278. case AV_PIX_FMT_YUVA420P9BE:
  1279. case AV_PIX_FMT_YUVA422P9BE:
  1280. case AV_PIX_FMT_YUVA444P9BE:
  1281. case AV_PIX_FMT_YUVA420P10BE:
  1282. case AV_PIX_FMT_YUVA422P10BE:
  1283. case AV_PIX_FMT_YUVA444P10BE:
  1284. case AV_PIX_FMT_YUVA422P12BE:
  1285. case AV_PIX_FMT_YUVA444P12BE:
  1286. case AV_PIX_FMT_YUVA420P16BE:
  1287. case AV_PIX_FMT_YUVA422P16BE:
  1288. case AV_PIX_FMT_YUVA444P16BE:
  1289. c->chrToYV12 = bswap16UV_c;
  1290. break;
  1291. #endif
  1292. case AV_PIX_FMT_VUYA:
  1293. case AV_PIX_FMT_VUYX:
  1294. c->chrToYV12 = read_vuyx_UV_c;
  1295. break;
  1296. case AV_PIX_FMT_XV30LE:
  1297. c->chrToYV12 = read_xv30le_UV_c;
  1298. break;
  1299. case AV_PIX_FMT_AYUV64LE:
  1300. c->chrToYV12 = read_ayuv64le_UV_c;
  1301. break;
  1302. case AV_PIX_FMT_XV36LE:
  1303. c->chrToYV12 = read_xv36le_UV_c;
  1304. break;
  1305. case AV_PIX_FMT_P010LE:
  1306. case AV_PIX_FMT_P210LE:
  1307. case AV_PIX_FMT_P410LE:
  1308. c->chrToYV12 = p010LEToUV_c;
  1309. break;
  1310. case AV_PIX_FMT_P010BE:
  1311. case AV_PIX_FMT_P210BE:
  1312. case AV_PIX_FMT_P410BE:
  1313. c->chrToYV12 = p010BEToUV_c;
  1314. break;
  1315. case AV_PIX_FMT_P012LE:
  1316. case AV_PIX_FMT_P212LE:
  1317. case AV_PIX_FMT_P412LE:
  1318. c->chrToYV12 = p012LEToUV_c;
  1319. break;
  1320. case AV_PIX_FMT_P012BE:
  1321. case AV_PIX_FMT_P212BE:
  1322. case AV_PIX_FMT_P412BE:
  1323. c->chrToYV12 = p012BEToUV_c;
  1324. break;
  1325. case AV_PIX_FMT_P016LE:
  1326. case AV_PIX_FMT_P216LE:
  1327. case AV_PIX_FMT_P416LE:
  1328. c->chrToYV12 = p016LEToUV_c;
  1329. break;
  1330. case AV_PIX_FMT_P016BE:
  1331. case AV_PIX_FMT_P216BE:
  1332. case AV_PIX_FMT_P416BE:
  1333. c->chrToYV12 = p016BEToUV_c;
  1334. break;
  1335. case AV_PIX_FMT_Y210LE:
  1336. c->chrToYV12 = y210le_UV_c;
  1337. break;
  1338. case AV_PIX_FMT_Y212LE:
  1339. c->chrToYV12 = y212le_UV_c;
  1340. break;
  1341. }
  1342. if (c->chrSrcHSubSample) {
  1343. switch (srcFormat) {
  1344. case AV_PIX_FMT_RGBA64BE:
  1345. c->chrToYV12 = rgb64BEToUV_half_c;
  1346. break;
  1347. case AV_PIX_FMT_RGBA64LE:
  1348. c->chrToYV12 = rgb64LEToUV_half_c;
  1349. break;
  1350. case AV_PIX_FMT_BGRA64BE:
  1351. c->chrToYV12 = bgr64BEToUV_half_c;
  1352. break;
  1353. case AV_PIX_FMT_BGRA64LE:
  1354. c->chrToYV12 = bgr64LEToUV_half_c;
  1355. break;
  1356. case AV_PIX_FMT_RGB48BE:
  1357. c->chrToYV12 = rgb48BEToUV_half_c;
  1358. break;
  1359. case AV_PIX_FMT_RGB48LE:
  1360. c->chrToYV12 = rgb48LEToUV_half_c;
  1361. break;
  1362. case AV_PIX_FMT_BGR48BE:
  1363. c->chrToYV12 = bgr48BEToUV_half_c;
  1364. break;
  1365. case AV_PIX_FMT_BGR48LE:
  1366. c->chrToYV12 = bgr48LEToUV_half_c;
  1367. break;
  1368. case AV_PIX_FMT_RGB32:
  1369. c->chrToYV12 = bgr32ToUV_half_c;
  1370. break;
  1371. case AV_PIX_FMT_RGB32_1:
  1372. c->chrToYV12 = bgr321ToUV_half_c;
  1373. break;
  1374. case AV_PIX_FMT_BGR24:
  1375. c->chrToYV12 = bgr24ToUV_half_c;
  1376. break;
  1377. case AV_PIX_FMT_BGR565LE:
  1378. c->chrToYV12 = bgr16leToUV_half_c;
  1379. break;
  1380. case AV_PIX_FMT_BGR565BE:
  1381. c->chrToYV12 = bgr16beToUV_half_c;
  1382. break;
  1383. case AV_PIX_FMT_BGR555LE:
  1384. c->chrToYV12 = bgr15leToUV_half_c;
  1385. break;
  1386. case AV_PIX_FMT_BGR555BE:
  1387. c->chrToYV12 = bgr15beToUV_half_c;
  1388. break;
  1389. case AV_PIX_FMT_GBRAP:
  1390. case AV_PIX_FMT_GBRP:
  1391. c->chrToYV12 = gbr24pToUV_half_c;
  1392. break;
  1393. case AV_PIX_FMT_BGR444LE:
  1394. c->chrToYV12 = bgr12leToUV_half_c;
  1395. break;
  1396. case AV_PIX_FMT_BGR444BE:
  1397. c->chrToYV12 = bgr12beToUV_half_c;
  1398. break;
  1399. case AV_PIX_FMT_BGR32:
  1400. c->chrToYV12 = rgb32ToUV_half_c;
  1401. break;
  1402. case AV_PIX_FMT_BGR32_1:
  1403. c->chrToYV12 = rgb321ToUV_half_c;
  1404. break;
  1405. case AV_PIX_FMT_RGB24:
  1406. c->chrToYV12 = rgb24ToUV_half_c;
  1407. break;
  1408. case AV_PIX_FMT_RGB565LE:
  1409. c->chrToYV12 = rgb16leToUV_half_c;
  1410. break;
  1411. case AV_PIX_FMT_RGB565BE:
  1412. c->chrToYV12 = rgb16beToUV_half_c;
  1413. break;
  1414. case AV_PIX_FMT_RGB555LE:
  1415. c->chrToYV12 = rgb15leToUV_half_c;
  1416. break;
  1417. case AV_PIX_FMT_RGB555BE:
  1418. c->chrToYV12 = rgb15beToUV_half_c;
  1419. break;
  1420. case AV_PIX_FMT_RGB444LE:
  1421. c->chrToYV12 = rgb12leToUV_half_c;
  1422. break;
  1423. case AV_PIX_FMT_RGB444BE:
  1424. c->chrToYV12 = rgb12beToUV_half_c;
  1425. break;
  1426. case AV_PIX_FMT_X2RGB10LE:
  1427. c->chrToYV12 = rgb30leToUV_half_c;
  1428. break;
  1429. case AV_PIX_FMT_X2BGR10LE:
  1430. c->chrToYV12 = bgr30leToUV_half_c;
  1431. break;
  1432. case AV_PIX_FMT_RGBAF16BE:
  1433. c->chrToYV12 = rgbaf16beToUV_half_c;
  1434. break;
  1435. case AV_PIX_FMT_RGBAF16LE:
  1436. c->chrToYV12 = rgbaf16leToUV_half_c;
  1437. break;
  1438. }
  1439. } else {
  1440. switch (srcFormat) {
  1441. case AV_PIX_FMT_RGBA64BE:
  1442. c->chrToYV12 = rgb64BEToUV_c;
  1443. break;
  1444. case AV_PIX_FMT_RGBA64LE:
  1445. c->chrToYV12 = rgb64LEToUV_c;
  1446. break;
  1447. case AV_PIX_FMT_BGRA64BE:
  1448. c->chrToYV12 = bgr64BEToUV_c;
  1449. break;
  1450. case AV_PIX_FMT_BGRA64LE:
  1451. c->chrToYV12 = bgr64LEToUV_c;
  1452. break;
  1453. case AV_PIX_FMT_RGB48BE:
  1454. c->chrToYV12 = rgb48BEToUV_c;
  1455. break;
  1456. case AV_PIX_FMT_RGB48LE:
  1457. c->chrToYV12 = rgb48LEToUV_c;
  1458. break;
  1459. case AV_PIX_FMT_BGR48BE:
  1460. c->chrToYV12 = bgr48BEToUV_c;
  1461. break;
  1462. case AV_PIX_FMT_BGR48LE:
  1463. c->chrToYV12 = bgr48LEToUV_c;
  1464. break;
  1465. case AV_PIX_FMT_RGB32:
  1466. c->chrToYV12 = bgr32ToUV_c;
  1467. break;
  1468. case AV_PIX_FMT_RGB32_1:
  1469. c->chrToYV12 = bgr321ToUV_c;
  1470. break;
  1471. case AV_PIX_FMT_BGR24:
  1472. c->chrToYV12 = bgr24ToUV_c;
  1473. break;
  1474. case AV_PIX_FMT_BGR565LE:
  1475. c->chrToYV12 = bgr16leToUV_c;
  1476. break;
  1477. case AV_PIX_FMT_BGR565BE:
  1478. c->chrToYV12 = bgr16beToUV_c;
  1479. break;
  1480. case AV_PIX_FMT_BGR555LE:
  1481. c->chrToYV12 = bgr15leToUV_c;
  1482. break;
  1483. case AV_PIX_FMT_BGR555BE:
  1484. c->chrToYV12 = bgr15beToUV_c;
  1485. break;
  1486. case AV_PIX_FMT_BGR444LE:
  1487. c->chrToYV12 = bgr12leToUV_c;
  1488. break;
  1489. case AV_PIX_FMT_BGR444BE:
  1490. c->chrToYV12 = bgr12beToUV_c;
  1491. break;
  1492. case AV_PIX_FMT_BGR32:
  1493. c->chrToYV12 = rgb32ToUV_c;
  1494. break;
  1495. case AV_PIX_FMT_BGR32_1:
  1496. c->chrToYV12 = rgb321ToUV_c;
  1497. break;
  1498. case AV_PIX_FMT_RGB24:
  1499. c->chrToYV12 = rgb24ToUV_c;
  1500. break;
  1501. case AV_PIX_FMT_RGB565LE:
  1502. c->chrToYV12 = rgb16leToUV_c;
  1503. break;
  1504. case AV_PIX_FMT_RGB565BE:
  1505. c->chrToYV12 = rgb16beToUV_c;
  1506. break;
  1507. case AV_PIX_FMT_RGB555LE:
  1508. c->chrToYV12 = rgb15leToUV_c;
  1509. break;
  1510. case AV_PIX_FMT_RGB555BE:
  1511. c->chrToYV12 = rgb15beToUV_c;
  1512. break;
  1513. case AV_PIX_FMT_RGB444LE:
  1514. c->chrToYV12 = rgb12leToUV_c;
  1515. break;
  1516. case AV_PIX_FMT_RGB444BE:
  1517. c->chrToYV12 = rgb12beToUV_c;
  1518. break;
  1519. case AV_PIX_FMT_X2RGB10LE:
  1520. c->chrToYV12 = rgb30leToUV_c;
  1521. break;
  1522. case AV_PIX_FMT_X2BGR10LE:
  1523. c->chrToYV12 = bgr30leToUV_c;
  1524. break;
  1525. case AV_PIX_FMT_RGBAF16BE:
  1526. c->chrToYV12 = rgbaf16beToUV_c;
  1527. break;
  1528. case AV_PIX_FMT_RGBAF16LE:
  1529. c->chrToYV12 = rgbaf16leToUV_c;
  1530. break;
  1531. }
  1532. }
  1533. c->lumToYV12 = NULL;
  1534. c->alpToYV12 = NULL;
  1535. switch (srcFormat) {
  1536. case AV_PIX_FMT_GBRP9LE:
  1537. c->readLumPlanar = planar_rgb9le_to_y;
  1538. break;
  1539. case AV_PIX_FMT_GBRAP10LE:
  1540. c->readAlpPlanar = planar_rgb10le_to_a;
  1541. case AV_PIX_FMT_GBRP10LE:
  1542. c->readLumPlanar = planar_rgb10le_to_y;
  1543. break;
  1544. case AV_PIX_FMT_GBRAP12LE:
  1545. c->readAlpPlanar = planar_rgb12le_to_a;
  1546. case AV_PIX_FMT_GBRP12LE:
  1547. c->readLumPlanar = planar_rgb12le_to_y;
  1548. break;
  1549. case AV_PIX_FMT_GBRAP14LE:
  1550. c->readAlpPlanar = planar_rgb14le_to_a;
  1551. case AV_PIX_FMT_GBRP14LE:
  1552. c->readLumPlanar = planar_rgb14le_to_y;
  1553. break;
  1554. case AV_PIX_FMT_GBRAP16LE:
  1555. c->readAlpPlanar = planar_rgb16le_to_a;
  1556. case AV_PIX_FMT_GBRP16LE:
  1557. c->readLumPlanar = planar_rgb16le_to_y;
  1558. break;
  1559. case AV_PIX_FMT_GBRAPF32LE:
  1560. c->readAlpPlanar = planar_rgbf32le_to_a;
  1561. case AV_PIX_FMT_GBRPF32LE:
  1562. c->readLumPlanar = planar_rgbf32le_to_y;
  1563. break;
  1564. case AV_PIX_FMT_GBRP9BE:
  1565. c->readLumPlanar = planar_rgb9be_to_y;
  1566. break;
  1567. case AV_PIX_FMT_GBRAP10BE:
  1568. c->readAlpPlanar = planar_rgb10be_to_a;
  1569. case AV_PIX_FMT_GBRP10BE:
  1570. c->readLumPlanar = planar_rgb10be_to_y;
  1571. break;
  1572. case AV_PIX_FMT_GBRAP12BE:
  1573. c->readAlpPlanar = planar_rgb12be_to_a;
  1574. case AV_PIX_FMT_GBRP12BE:
  1575. c->readLumPlanar = planar_rgb12be_to_y;
  1576. break;
  1577. case AV_PIX_FMT_GBRAP14BE:
  1578. c->readAlpPlanar = planar_rgb14be_to_a;
  1579. case AV_PIX_FMT_GBRP14BE:
  1580. c->readLumPlanar = planar_rgb14be_to_y;
  1581. break;
  1582. case AV_PIX_FMT_GBRAP16BE:
  1583. c->readAlpPlanar = planar_rgb16be_to_a;
  1584. case AV_PIX_FMT_GBRP16BE:
  1585. c->readLumPlanar = planar_rgb16be_to_y;
  1586. break;
  1587. case AV_PIX_FMT_GBRAPF32BE:
  1588. c->readAlpPlanar = planar_rgbf32be_to_a;
  1589. case AV_PIX_FMT_GBRPF32BE:
  1590. c->readLumPlanar = planar_rgbf32be_to_y;
  1591. break;
  1592. case AV_PIX_FMT_GBRAP:
  1593. c->readAlpPlanar = planar_rgb_to_a;
  1594. case AV_PIX_FMT_GBRP:
  1595. c->readLumPlanar = planar_rgb_to_y;
  1596. break;
  1597. #if HAVE_BIGENDIAN
  1598. case AV_PIX_FMT_YUV420P9LE:
  1599. case AV_PIX_FMT_YUV422P9LE:
  1600. case AV_PIX_FMT_YUV444P9LE:
  1601. case AV_PIX_FMT_YUV420P10LE:
  1602. case AV_PIX_FMT_YUV422P10LE:
  1603. case AV_PIX_FMT_YUV440P10LE:
  1604. case AV_PIX_FMT_YUV444P10LE:
  1605. case AV_PIX_FMT_YUV420P12LE:
  1606. case AV_PIX_FMT_YUV422P12LE:
  1607. case AV_PIX_FMT_YUV440P12LE:
  1608. case AV_PIX_FMT_YUV444P12LE:
  1609. case AV_PIX_FMT_YUV420P14LE:
  1610. case AV_PIX_FMT_YUV422P14LE:
  1611. case AV_PIX_FMT_YUV444P14LE:
  1612. case AV_PIX_FMT_YUV420P16LE:
  1613. case AV_PIX_FMT_YUV422P16LE:
  1614. case AV_PIX_FMT_YUV444P16LE:
  1615. case AV_PIX_FMT_GRAY9LE:
  1616. case AV_PIX_FMT_GRAY10LE:
  1617. case AV_PIX_FMT_GRAY12LE:
  1618. case AV_PIX_FMT_GRAY14LE:
  1619. case AV_PIX_FMT_GRAY16LE:
  1620. case AV_PIX_FMT_P016LE:
  1621. case AV_PIX_FMT_P216LE:
  1622. case AV_PIX_FMT_P416LE:
  1623. c->lumToYV12 = bswap16Y_c;
  1624. break;
  1625. case AV_PIX_FMT_YUVA420P9LE:
  1626. case AV_PIX_FMT_YUVA422P9LE:
  1627. case AV_PIX_FMT_YUVA444P9LE:
  1628. case AV_PIX_FMT_YUVA420P10LE:
  1629. case AV_PIX_FMT_YUVA422P10LE:
  1630. case AV_PIX_FMT_YUVA444P10LE:
  1631. case AV_PIX_FMT_YUVA422P12LE:
  1632. case AV_PIX_FMT_YUVA444P12LE:
  1633. case AV_PIX_FMT_YUVA420P16LE:
  1634. case AV_PIX_FMT_YUVA422P16LE:
  1635. case AV_PIX_FMT_YUVA444P16LE:
  1636. c->lumToYV12 = bswap16Y_c;
  1637. c->alpToYV12 = bswap16Y_c;
  1638. break;
  1639. #else
  1640. case AV_PIX_FMT_YUV420P9BE:
  1641. case AV_PIX_FMT_YUV422P9BE:
  1642. case AV_PIX_FMT_YUV444P9BE:
  1643. case AV_PIX_FMT_YUV420P10BE:
  1644. case AV_PIX_FMT_YUV422P10BE:
  1645. case AV_PIX_FMT_YUV440P10BE:
  1646. case AV_PIX_FMT_YUV444P10BE:
  1647. case AV_PIX_FMT_YUV420P12BE:
  1648. case AV_PIX_FMT_YUV422P12BE:
  1649. case AV_PIX_FMT_YUV440P12BE:
  1650. case AV_PIX_FMT_YUV444P12BE:
  1651. case AV_PIX_FMT_YUV420P14BE:
  1652. case AV_PIX_FMT_YUV422P14BE:
  1653. case AV_PIX_FMT_YUV444P14BE:
  1654. case AV_PIX_FMT_YUV420P16BE:
  1655. case AV_PIX_FMT_YUV422P16BE:
  1656. case AV_PIX_FMT_YUV444P16BE:
  1657. case AV_PIX_FMT_GRAY9BE:
  1658. case AV_PIX_FMT_GRAY10BE:
  1659. case AV_PIX_FMT_GRAY12BE:
  1660. case AV_PIX_FMT_GRAY14BE:
  1661. case AV_PIX_FMT_GRAY16BE:
  1662. case AV_PIX_FMT_P016BE:
  1663. case AV_PIX_FMT_P216BE:
  1664. case AV_PIX_FMT_P416BE:
  1665. c->lumToYV12 = bswap16Y_c;
  1666. break;
  1667. case AV_PIX_FMT_YUVA420P9BE:
  1668. case AV_PIX_FMT_YUVA422P9BE:
  1669. case AV_PIX_FMT_YUVA444P9BE:
  1670. case AV_PIX_FMT_YUVA420P10BE:
  1671. case AV_PIX_FMT_YUVA422P10BE:
  1672. case AV_PIX_FMT_YUVA444P10BE:
  1673. case AV_PIX_FMT_YUVA422P12BE:
  1674. case AV_PIX_FMT_YUVA444P12BE:
  1675. case AV_PIX_FMT_YUVA420P16BE:
  1676. case AV_PIX_FMT_YUVA422P16BE:
  1677. case AV_PIX_FMT_YUVA444P16BE:
  1678. c->lumToYV12 = bswap16Y_c;
  1679. c->alpToYV12 = bswap16Y_c;
  1680. break;
  1681. #endif
  1682. case AV_PIX_FMT_YA16LE:
  1683. c->lumToYV12 = read_ya16le_gray_c;
  1684. break;
  1685. case AV_PIX_FMT_YA16BE:
  1686. c->lumToYV12 = read_ya16be_gray_c;
  1687. break;
  1688. case AV_PIX_FMT_VUYA:
  1689. case AV_PIX_FMT_VUYX:
  1690. c->lumToYV12 = read_vuyx_Y_c;
  1691. break;
  1692. case AV_PIX_FMT_XV30LE:
  1693. c->lumToYV12 = read_xv30le_Y_c;
  1694. break;
  1695. case AV_PIX_FMT_AYUV64LE:
  1696. c->lumToYV12 = read_ayuv64le_Y_c;
  1697. break;
  1698. case AV_PIX_FMT_XV36LE:
  1699. c->lumToYV12 = read_xv36le_Y_c;
  1700. break;
  1701. case AV_PIX_FMT_YUYV422:
  1702. case AV_PIX_FMT_YVYU422:
  1703. case AV_PIX_FMT_YA8:
  1704. c->lumToYV12 = yuy2ToY_c;
  1705. break;
  1706. case AV_PIX_FMT_UYVY422:
  1707. c->lumToYV12 = uyvyToY_c;
  1708. break;
  1709. case AV_PIX_FMT_BGR24:
  1710. c->lumToYV12 = bgr24ToY_c;
  1711. break;
  1712. case AV_PIX_FMT_BGR565LE:
  1713. c->lumToYV12 = bgr16leToY_c;
  1714. break;
  1715. case AV_PIX_FMT_BGR565BE:
  1716. c->lumToYV12 = bgr16beToY_c;
  1717. break;
  1718. case AV_PIX_FMT_BGR555LE:
  1719. c->lumToYV12 = bgr15leToY_c;
  1720. break;
  1721. case AV_PIX_FMT_BGR555BE:
  1722. c->lumToYV12 = bgr15beToY_c;
  1723. break;
  1724. case AV_PIX_FMT_BGR444LE:
  1725. c->lumToYV12 = bgr12leToY_c;
  1726. break;
  1727. case AV_PIX_FMT_BGR444BE:
  1728. c->lumToYV12 = bgr12beToY_c;
  1729. break;
  1730. case AV_PIX_FMT_RGB24:
  1731. c->lumToYV12 = rgb24ToY_c;
  1732. break;
  1733. case AV_PIX_FMT_RGB565LE:
  1734. c->lumToYV12 = rgb16leToY_c;
  1735. break;
  1736. case AV_PIX_FMT_RGB565BE:
  1737. c->lumToYV12 = rgb16beToY_c;
  1738. break;
  1739. case AV_PIX_FMT_RGB555LE:
  1740. c->lumToYV12 = rgb15leToY_c;
  1741. break;
  1742. case AV_PIX_FMT_RGB555BE:
  1743. c->lumToYV12 = rgb15beToY_c;
  1744. break;
  1745. case AV_PIX_FMT_RGB444LE:
  1746. c->lumToYV12 = rgb12leToY_c;
  1747. break;
  1748. case AV_PIX_FMT_RGB444BE:
  1749. c->lumToYV12 = rgb12beToY_c;
  1750. break;
  1751. case AV_PIX_FMT_RGB8:
  1752. case AV_PIX_FMT_BGR8:
  1753. case AV_PIX_FMT_PAL8:
  1754. case AV_PIX_FMT_BGR4_BYTE:
  1755. case AV_PIX_FMT_RGB4_BYTE:
  1756. c->lumToYV12 = palToY_c;
  1757. break;
  1758. case AV_PIX_FMT_MONOBLACK:
  1759. c->lumToYV12 = monoblack2Y_c;
  1760. break;
  1761. case AV_PIX_FMT_MONOWHITE:
  1762. c->lumToYV12 = monowhite2Y_c;
  1763. break;
  1764. case AV_PIX_FMT_RGB32:
  1765. c->lumToYV12 = bgr32ToY_c;
  1766. break;
  1767. case AV_PIX_FMT_RGB32_1:
  1768. c->lumToYV12 = bgr321ToY_c;
  1769. break;
  1770. case AV_PIX_FMT_BGR32:
  1771. c->lumToYV12 = rgb32ToY_c;
  1772. break;
  1773. case AV_PIX_FMT_BGR32_1:
  1774. c->lumToYV12 = rgb321ToY_c;
  1775. break;
  1776. case AV_PIX_FMT_RGB48BE:
  1777. c->lumToYV12 = rgb48BEToY_c;
  1778. break;
  1779. case AV_PIX_FMT_RGB48LE:
  1780. c->lumToYV12 = rgb48LEToY_c;
  1781. break;
  1782. case AV_PIX_FMT_BGR48BE:
  1783. c->lumToYV12 = bgr48BEToY_c;
  1784. break;
  1785. case AV_PIX_FMT_BGR48LE:
  1786. c->lumToYV12 = bgr48LEToY_c;
  1787. break;
  1788. case AV_PIX_FMT_RGBA64BE:
  1789. c->lumToYV12 = rgb64BEToY_c;
  1790. break;
  1791. case AV_PIX_FMT_RGBA64LE:
  1792. c->lumToYV12 = rgb64LEToY_c;
  1793. break;
  1794. case AV_PIX_FMT_BGRA64BE:
  1795. c->lumToYV12 = bgr64BEToY_c;
  1796. break;
  1797. case AV_PIX_FMT_BGRA64LE:
  1798. c->lumToYV12 = bgr64LEToY_c;
  1799. break;
  1800. case AV_PIX_FMT_P010LE:
  1801. case AV_PIX_FMT_P210LE:
  1802. case AV_PIX_FMT_P410LE:
  1803. c->lumToYV12 = p010LEToY_c;
  1804. break;
  1805. case AV_PIX_FMT_P010BE:
  1806. case AV_PIX_FMT_P210BE:
  1807. case AV_PIX_FMT_P410BE:
  1808. c->lumToYV12 = p010BEToY_c;
  1809. break;
  1810. case AV_PIX_FMT_P012LE:
  1811. case AV_PIX_FMT_P212LE:
  1812. case AV_PIX_FMT_P412LE:
  1813. c->lumToYV12 = p012LEToY_c;
  1814. break;
  1815. case AV_PIX_FMT_P012BE:
  1816. case AV_PIX_FMT_P212BE:
  1817. case AV_PIX_FMT_P412BE:
  1818. c->lumToYV12 = p012BEToY_c;
  1819. break;
  1820. case AV_PIX_FMT_GRAYF32LE:
  1821. c->lumToYV12 = grayf32leToY16_c;
  1822. break;
  1823. case AV_PIX_FMT_GRAYF32BE:
  1824. c->lumToYV12 = grayf32beToY16_c;
  1825. break;
  1826. case AV_PIX_FMT_Y210LE:
  1827. c->lumToYV12 = y210le_Y_c;
  1828. break;
  1829. case AV_PIX_FMT_Y212LE:
  1830. c->lumToYV12 = y212le_Y_c;
  1831. break;
  1832. case AV_PIX_FMT_X2RGB10LE:
  1833. c->lumToYV12 = rgb30leToY_c;
  1834. break;
  1835. case AV_PIX_FMT_X2BGR10LE:
  1836. c->lumToYV12 = bgr30leToY_c;
  1837. break;
  1838. case AV_PIX_FMT_RGBAF16BE:
  1839. c->lumToYV12 = rgbaf16beToY_c;
  1840. break;
  1841. case AV_PIX_FMT_RGBAF16LE:
  1842. c->lumToYV12 = rgbaf16leToY_c;
  1843. break;
  1844. }
  1845. if (c->needAlpha) {
  1846. if (is16BPS(srcFormat) || isNBPS(srcFormat)) {
  1847. if (HAVE_BIGENDIAN == !isBE(srcFormat) && !c->readAlpPlanar)
  1848. c->alpToYV12 = bswap16Y_c;
  1849. }
  1850. switch (srcFormat) {
  1851. case AV_PIX_FMT_BGRA64LE:
  1852. case AV_PIX_FMT_RGBA64LE: c->alpToYV12 = rgba64leToA_c; break;
  1853. case AV_PIX_FMT_BGRA64BE:
  1854. case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64beToA_c; break;
  1855. case AV_PIX_FMT_BGRA:
  1856. case AV_PIX_FMT_RGBA:
  1857. c->alpToYV12 = rgbaToA_c;
  1858. break;
  1859. case AV_PIX_FMT_ABGR:
  1860. case AV_PIX_FMT_ARGB:
  1861. c->alpToYV12 = abgrToA_c;
  1862. break;
  1863. case AV_PIX_FMT_RGBAF16BE:
  1864. c->alpToYV12 = rgbaf16beToA_c;
  1865. break;
  1866. case AV_PIX_FMT_RGBAF16LE:
  1867. c->alpToYV12 = rgbaf16leToA_c;
  1868. break;
  1869. case AV_PIX_FMT_YA8:
  1870. c->alpToYV12 = uyvyToY_c;
  1871. break;
  1872. case AV_PIX_FMT_YA16LE:
  1873. c->alpToYV12 = read_ya16le_alpha_c;
  1874. break;
  1875. case AV_PIX_FMT_YA16BE:
  1876. c->alpToYV12 = read_ya16be_alpha_c;
  1877. break;
  1878. case AV_PIX_FMT_VUYA:
  1879. c->alpToYV12 = read_vuya_A_c;
  1880. break;
  1881. case AV_PIX_FMT_AYUV64LE:
  1882. c->alpToYV12 = read_ayuv64le_A_c;
  1883. break;
  1884. case AV_PIX_FMT_PAL8 :
  1885. c->alpToYV12 = palToA_c;
  1886. break;
  1887. }
  1888. }
  1889. }