swscale.c 114 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025
  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /*
  21. supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR32_1, BGR24, BGR16, BGR15, RGB32, RGB32_1, RGB24, Y8/Y800, YVU9/IF09, PAL8
  22. supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
  23. {BGR,RGB}{1,4,8,15,16} support dithering
  24. unscaled special converters (YV12=I420=IYUV, Y800=Y8)
  25. YV12 -> {BGR,RGB}{1,4,8,12,15,16,24,32}
  26. x -> x
  27. YUV9 -> YV12
  28. YUV9/YV12 -> Y800
  29. Y800 -> YUV9/YV12
  30. BGR24 -> BGR32 & RGB24 -> RGB32
  31. BGR32 -> BGR24 & RGB32 -> RGB24
  32. BGR15 -> BGR16
  33. */
  34. /*
  35. tested special converters (most are tested actually, but I did not write it down ...)
  36. YV12 -> BGR12/BGR16
  37. YV12 -> YV12
  38. BGR15 -> BGR16
  39. BGR16 -> BGR16
  40. YVU9 -> YV12
  41. untested special converters
  42. YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be OK)
  43. YV12/I420 -> YV12/I420
  44. YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
  45. BGR24 -> BGR32 & RGB24 -> RGB32
  46. BGR32 -> BGR24 & RGB32 -> RGB24
  47. BGR24 -> YV12
  48. */
  49. #include <inttypes.h>
  50. #include <string.h>
  51. #include <math.h>
  52. #include <stdio.h>
  53. #include "config.h"
  54. #include <assert.h>
  55. #include "swscale.h"
  56. #include "swscale_internal.h"
  57. #include "rgb2rgb.h"
  58. #include "libavutil/avassert.h"
  59. #include "libavutil/intreadwrite.h"
  60. #include "libavutil/cpu.h"
  61. #include "libavutil/avutil.h"
  62. #include "libavutil/mathematics.h"
  63. #include "libavutil/bswap.h"
  64. #include "libavutil/pixdesc.h"
  65. #define RGB2YUV_SHIFT 15
  66. #define BY ( (int)(0.114*219/255*(1<<RGB2YUV_SHIFT)+0.5))
  67. #define BV (-(int)(0.081*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  68. #define BU ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  69. #define GY ( (int)(0.587*219/255*(1<<RGB2YUV_SHIFT)+0.5))
  70. #define GV (-(int)(0.419*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  71. #define GU (-(int)(0.331*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  72. #define RY ( (int)(0.299*219/255*(1<<RGB2YUV_SHIFT)+0.5))
  73. #define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  74. #define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5))
  75. /*
  76. NOTES
  77. Special versions: fast Y 1:1 scaling (no interpolation in y direction)
  78. TODO
  79. more intelligent misalignment avoidance for the horizontal scaler
  80. write special vertical cubic upscale version
  81. optimize C code (YV12 / minmax)
  82. add support for packed pixel YUV input & output
  83. add support for Y8 output
  84. optimize BGR24 & BGR32
  85. add BGR4 output support
  86. write special BGR->BGR scaler
  87. */
  88. DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
  89. { 1, 3, 1, 3, 1, 3, 1, 3, },
  90. { 2, 0, 2, 0, 2, 0, 2, 0, },
  91. };
  92. DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={
  93. { 6, 2, 6, 2, 6, 2, 6, 2, },
  94. { 0, 4, 0, 4, 0, 4, 0, 4, },
  95. };
  96. DECLARE_ALIGNED(8, const uint8_t, dither_4x4_16)[4][8]={
  97. { 8, 4, 11, 7, 8, 4, 11, 7, },
  98. { 2, 14, 1, 13, 2, 14, 1, 13, },
  99. { 10, 6, 9, 5, 10, 6, 9, 5, },
  100. { 0, 12, 3, 15, 0, 12, 3, 15, },
  101. };
  102. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_32)[8][8]={
  103. { 17, 9, 23, 15, 16, 8, 22, 14, },
  104. { 5, 29, 3, 27, 4, 28, 2, 26, },
  105. { 21, 13, 19, 11, 20, 12, 18, 10, },
  106. { 0, 24, 6, 30, 1, 25, 7, 31, },
  107. { 16, 8, 22, 14, 17, 9, 23, 15, },
  108. { 4, 28, 2, 26, 5, 29, 3, 27, },
  109. { 20, 12, 18, 10, 21, 13, 19, 11, },
  110. { 1, 25, 7, 31, 0, 24, 6, 30, },
  111. };
  112. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_73)[8][8]={
  113. { 0, 55, 14, 68, 3, 58, 17, 72, },
  114. { 37, 18, 50, 32, 40, 22, 54, 35, },
  115. { 9, 64, 5, 59, 13, 67, 8, 63, },
  116. { 46, 27, 41, 23, 49, 31, 44, 26, },
  117. { 2, 57, 16, 71, 1, 56, 15, 70, },
  118. { 39, 21, 52, 34, 38, 19, 51, 33, },
  119. { 11, 66, 7, 62, 10, 65, 6, 60, },
  120. { 48, 30, 43, 25, 47, 29, 42, 24, },
  121. };
  122. #if 1
  123. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
  124. {117, 62, 158, 103, 113, 58, 155, 100, },
  125. { 34, 199, 21, 186, 31, 196, 17, 182, },
  126. {144, 89, 131, 76, 141, 86, 127, 72, },
  127. { 0, 165, 41, 206, 10, 175, 52, 217, },
  128. {110, 55, 151, 96, 120, 65, 162, 107, },
  129. { 28, 193, 14, 179, 38, 203, 24, 189, },
  130. {138, 83, 124, 69, 148, 93, 134, 79, },
  131. { 7, 172, 48, 213, 3, 168, 45, 210, },
  132. };
  133. #elif 1
  134. // tries to correct a gamma of 1.5
  135. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
  136. { 0, 143, 18, 200, 2, 156, 25, 215, },
  137. { 78, 28, 125, 64, 89, 36, 138, 74, },
  138. { 10, 180, 3, 161, 16, 195, 8, 175, },
  139. {109, 51, 93, 38, 121, 60, 105, 47, },
  140. { 1, 152, 23, 210, 0, 147, 20, 205, },
  141. { 85, 33, 134, 71, 81, 30, 130, 67, },
  142. { 14, 190, 6, 171, 12, 185, 5, 166, },
  143. {117, 57, 101, 44, 113, 54, 97, 41, },
  144. };
  145. #elif 1
  146. // tries to correct a gamma of 2.0
  147. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
  148. { 0, 124, 8, 193, 0, 140, 12, 213, },
  149. { 55, 14, 104, 42, 66, 19, 119, 52, },
  150. { 3, 168, 1, 145, 6, 187, 3, 162, },
  151. { 86, 31, 70, 21, 99, 39, 82, 28, },
  152. { 0, 134, 11, 206, 0, 129, 9, 200, },
  153. { 62, 17, 114, 48, 58, 16, 109, 45, },
  154. { 5, 181, 2, 157, 4, 175, 1, 151, },
  155. { 95, 36, 78, 26, 90, 34, 74, 24, },
  156. };
  157. #else
  158. // tries to correct a gamma of 2.5
  159. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
  160. { 0, 107, 3, 187, 0, 125, 6, 212, },
  161. { 39, 7, 86, 28, 49, 11, 102, 36, },
  162. { 1, 158, 0, 131, 3, 180, 1, 151, },
  163. { 68, 19, 52, 12, 81, 25, 64, 17, },
  164. { 0, 119, 5, 203, 0, 113, 4, 195, },
  165. { 45, 9, 96, 33, 42, 8, 91, 30, },
  166. { 2, 172, 1, 144, 2, 165, 0, 137, },
  167. { 77, 23, 60, 15, 72, 21, 56, 14, },
  168. };
  169. #endif
  170. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_128)[8][8] = {
  171. { 36, 68, 60, 92, 34, 66, 58, 90,},
  172. { 100, 4,124, 28, 98, 2,122, 26,},
  173. { 52, 84, 44, 76, 50, 82, 42, 74,},
  174. { 116, 20,108, 12,114, 18,106, 10,},
  175. { 32, 64, 56, 88, 38, 70, 62, 94,},
  176. { 96, 0,120, 24,102, 6,126, 30,},
  177. { 48, 80, 40, 72, 54, 86, 46, 78,},
  178. { 112, 16,104, 8,118, 22,110, 14,},
  179. };
  180. DECLARE_ALIGNED(8, const uint8_t, ff_sws_pb_64)[8] =
  181. { 64, 64, 64, 64, 64, 64, 64, 64 };
  182. DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={
  183. {
  184. { 0, 1, 0, 1, 0, 1, 0, 1,},
  185. { 1, 0, 1, 0, 1, 0, 1, 0,},
  186. { 0, 1, 0, 1, 0, 1, 0, 1,},
  187. { 1, 0, 1, 0, 1, 0, 1, 0,},
  188. { 0, 1, 0, 1, 0, 1, 0, 1,},
  189. { 1, 0, 1, 0, 1, 0, 1, 0,},
  190. { 0, 1, 0, 1, 0, 1, 0, 1,},
  191. { 1, 0, 1, 0, 1, 0, 1, 0,},
  192. },{
  193. { 1, 2, 1, 2, 1, 2, 1, 2,},
  194. { 3, 0, 3, 0, 3, 0, 3, 0,},
  195. { 1, 2, 1, 2, 1, 2, 1, 2,},
  196. { 3, 0, 3, 0, 3, 0, 3, 0,},
  197. { 1, 2, 1, 2, 1, 2, 1, 2,},
  198. { 3, 0, 3, 0, 3, 0, 3, 0,},
  199. { 1, 2, 1, 2, 1, 2, 1, 2,},
  200. { 3, 0, 3, 0, 3, 0, 3, 0,},
  201. },{
  202. { 2, 4, 3, 5, 2, 4, 3, 5,},
  203. { 6, 0, 7, 1, 6, 0, 7, 1,},
  204. { 3, 5, 2, 4, 3, 5, 2, 4,},
  205. { 7, 1, 6, 0, 7, 1, 6, 0,},
  206. { 2, 4, 3, 5, 2, 4, 3, 5,},
  207. { 6, 0, 7, 1, 6, 0, 7, 1,},
  208. { 3, 5, 2, 4, 3, 5, 2, 4,},
  209. { 7, 1, 6, 0, 7, 1, 6, 0,},
  210. },{
  211. { 4, 8, 7, 11, 4, 8, 7, 11,},
  212. { 12, 0, 15, 3, 12, 0, 15, 3,},
  213. { 6, 10, 5, 9, 6, 10, 5, 9,},
  214. { 14, 2, 13, 1, 14, 2, 13, 1,},
  215. { 4, 8, 7, 11, 4, 8, 7, 11,},
  216. { 12, 0, 15, 3, 12, 0, 15, 3,},
  217. { 6, 10, 5, 9, 6, 10, 5, 9,},
  218. { 14, 2, 13, 1, 14, 2, 13, 1,},
  219. },{
  220. { 9, 17, 15, 23, 8, 16, 14, 22,},
  221. { 25, 1, 31, 7, 24, 0, 30, 6,},
  222. { 13, 21, 11, 19, 12, 20, 10, 18,},
  223. { 29, 5, 27, 3, 28, 4, 26, 2,},
  224. { 8, 16, 14, 22, 9, 17, 15, 23,},
  225. { 24, 0, 30, 6, 25, 1, 31, 7,},
  226. { 12, 20, 10, 18, 13, 21, 11, 19,},
  227. { 28, 4, 26, 2, 29, 5, 27, 3,},
  228. },{
  229. { 18, 34, 30, 46, 17, 33, 29, 45,},
  230. { 50, 2, 62, 14, 49, 1, 61, 13,},
  231. { 26, 42, 22, 38, 25, 41, 21, 37,},
  232. { 58, 10, 54, 6, 57, 9, 53, 5,},
  233. { 16, 32, 28, 44, 19, 35, 31, 47,},
  234. { 48, 0, 60, 12, 51, 3, 63, 15,},
  235. { 24, 40, 20, 36, 27, 43, 23, 39,},
  236. { 56, 8, 52, 4, 59, 11, 55, 7,},
  237. },{
  238. { 18, 34, 30, 46, 17, 33, 29, 45,},
  239. { 50, 2, 62, 14, 49, 1, 61, 13,},
  240. { 26, 42, 22, 38, 25, 41, 21, 37,},
  241. { 58, 10, 54, 6, 57, 9, 53, 5,},
  242. { 16, 32, 28, 44, 19, 35, 31, 47,},
  243. { 48, 0, 60, 12, 51, 3, 63, 15,},
  244. { 24, 40, 20, 36, 27, 43, 23, 39,},
  245. { 56, 8, 52, 4, 59, 11, 55, 7,},
  246. },{
  247. { 36, 68, 60, 92, 34, 66, 58, 90,},
  248. { 100, 4,124, 28, 98, 2,122, 26,},
  249. { 52, 84, 44, 76, 50, 82, 42, 74,},
  250. { 116, 20,108, 12,114, 18,106, 10,},
  251. { 32, 64, 56, 88, 38, 70, 62, 94,},
  252. { 96, 0,120, 24,102, 6,126, 30,},
  253. { 48, 80, 40, 72, 54, 86, 46, 78,},
  254. { 112, 16,104, 8,118, 22,110, 14,},
  255. }};
  256. static const uint8_t flat64[8]={64,64,64,64,64,64,64,64};
  257. const uint16_t dither_scale[15][16]={
  258. { 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,},
  259. { 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,},
  260. { 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,},
  261. { 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,},
  262. { 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,},
  263. { 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,},
  264. { 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,},
  265. { 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,},
  266. { 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,},
  267. { 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,},
  268. { 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,},
  269. { 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,},
  270. { 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,},
  271. { 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,},
  272. { 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,},
  273. };
  274. static av_always_inline void
  275. yuv2yuvX16_c_template(const int16_t *lumFilter, const int32_t **lumSrc,
  276. int lumFilterSize, const int16_t *chrFilter,
  277. const int32_t **chrUSrc, const int32_t **chrVSrc,
  278. int chrFilterSize, const int32_t **alpSrc,
  279. uint16_t *dest[4], int dstW, int chrDstW,
  280. int big_endian, int output_bits)
  281. {
  282. //FIXME Optimize (just quickly written not optimized..)
  283. int i;
  284. int dword= output_bits == 16;
  285. uint16_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
  286. *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
  287. int shift = 11 + 4*dword + 16 - output_bits - 1;
  288. #define output_pixel(pos, val) \
  289. if (big_endian) { \
  290. AV_WB16(pos, av_clip_uint16(val >> shift)); \
  291. } else { \
  292. AV_WL16(pos, av_clip_uint16(val >> shift)); \
  293. }
  294. for (i = 0; i < dstW; i++) {
  295. int val = 1 << (26-output_bits + 4*dword - 1);
  296. int j;
  297. for (j = 0; j < lumFilterSize; j++)
  298. val += ((dword ? lumSrc[j][i] : ((int16_t**)lumSrc)[j][i]) * lumFilter[j])>>1;
  299. output_pixel(&yDest[i], val);
  300. }
  301. if (uDest) {
  302. for (i = 0; i < chrDstW; i++) {
  303. int u = 1 << (26-output_bits + 4*dword - 1);
  304. int v = 1 << (26-output_bits + 4*dword - 1);
  305. int j;
  306. for (j = 0; j < chrFilterSize; j++) {
  307. u += ((dword ? chrUSrc[j][i] : ((int16_t**)chrUSrc)[j][i]) * chrFilter[j]) >> 1;
  308. v += ((dword ? chrVSrc[j][i] : ((int16_t**)chrVSrc)[j][i]) * chrFilter[j]) >> 1;
  309. }
  310. output_pixel(&uDest[i], u);
  311. output_pixel(&vDest[i], v);
  312. }
  313. }
  314. if (CONFIG_SWSCALE_ALPHA && aDest) {
  315. for (i = 0; i < dstW; i++) {
  316. int val = 1 << (26-output_bits + 4*dword - 1);
  317. int j;
  318. for (j = 0; j < lumFilterSize; j++)
  319. val += ((dword ? alpSrc[j][i] : ((int16_t**)alpSrc)[j][i]) * lumFilter[j]) >> 1;
  320. output_pixel(&aDest[i], val);
  321. }
  322. }
  323. #undef output_pixel
  324. }
  325. static av_always_inline void
  326. yuv2yuvX10_c_template(const int16_t *lumFilter, const int16_t **lumSrc,
  327. int lumFilterSize, const int16_t *chrFilter,
  328. const int16_t **chrUSrc, const int16_t **chrVSrc,
  329. int chrFilterSize, const int16_t **alpSrc,
  330. uint16_t *dest[4], int dstW, int chrDstW,
  331. int big_endian, int output_bits)
  332. {
  333. //FIXME Optimize (just quickly written not optimized..)
  334. int i;
  335. uint16_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
  336. *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
  337. int shift = 11 + 16 - output_bits;
  338. #define output_pixel(pos, val) \
  339. if (big_endian) { \
  340. AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  341. } else { \
  342. AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
  343. }
  344. for (i = 0; i < dstW; i++) {
  345. int val = 1 << (26-output_bits);
  346. int j;
  347. for (j = 0; j < lumFilterSize; j++)
  348. val += lumSrc[j][i] * lumFilter[j];
  349. output_pixel(&yDest[i], val);
  350. }
  351. if (uDest) {
  352. for (i = 0; i < chrDstW; i++) {
  353. int u = 1 << (26-output_bits);
  354. int v = 1 << (26-output_bits);
  355. int j;
  356. for (j = 0; j < chrFilterSize; j++) {
  357. u += chrUSrc[j][i] * chrFilter[j];
  358. v += chrVSrc[j][i] * chrFilter[j];
  359. }
  360. output_pixel(&uDest[i], u);
  361. output_pixel(&vDest[i], v);
  362. }
  363. }
  364. if (CONFIG_SWSCALE_ALPHA && aDest) {
  365. for (i = 0; i < dstW; i++) {
  366. int val = 1 << (26-output_bits);
  367. int j;
  368. for (j = 0; j < lumFilterSize; j++)
  369. val += alpSrc[j][i] * lumFilter[j];
  370. output_pixel(&aDest[i], val);
  371. }
  372. }
  373. #undef output_pixel
  374. }
  375. #define yuv2NBPS(bits, BE_LE, is_be, yuv2yuvX_template_fn, typeX_t) \
  376. static void yuv2yuvX ## bits ## BE_LE ## _c(SwsContext *c, const int16_t *lumFilter, \
  377. const int16_t **_lumSrc, int lumFilterSize, \
  378. const int16_t *chrFilter, const int16_t **_chrUSrc, \
  379. const int16_t **_chrVSrc, \
  380. int chrFilterSize, const int16_t **_alpSrc, \
  381. uint8_t *_dest[4], int dstW, int chrDstW) \
  382. { \
  383. const typeX_t **lumSrc = (const typeX_t **) _lumSrc, \
  384. **chrUSrc = (const typeX_t **) _chrUSrc, \
  385. **chrVSrc = (const typeX_t **) _chrVSrc, \
  386. **alpSrc = (const typeX_t **) _alpSrc; \
  387. yuv2yuvX_template_fn(lumFilter, lumSrc, lumFilterSize, \
  388. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  389. alpSrc, (uint16_t **) _dest, \
  390. dstW, chrDstW, is_be, bits); \
  391. }
  392. yuv2NBPS( 9, BE, 1, yuv2yuvX10_c_template, int16_t);
  393. yuv2NBPS( 9, LE, 0, yuv2yuvX10_c_template, int16_t);
  394. yuv2NBPS(10, BE, 1, yuv2yuvX10_c_template, int16_t);
  395. yuv2NBPS(10, LE, 0, yuv2yuvX10_c_template, int16_t);
  396. yuv2NBPS(16, BE, 1, yuv2yuvX16_c_template, int32_t);
  397. yuv2NBPS(16, LE, 0, yuv2yuvX16_c_template, int32_t);
  398. static void yuv2yuvX_c(SwsContext *c, const int16_t *lumFilter,
  399. const int16_t **lumSrc, int lumFilterSize,
  400. const int16_t *chrFilter, const int16_t **chrUSrc,
  401. const int16_t **chrVSrc,
  402. int chrFilterSize, const int16_t **alpSrc,
  403. uint8_t *dest[4], int dstW, int chrDstW)
  404. {
  405. uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
  406. *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
  407. int i;
  408. const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
  409. //FIXME Optimize (just quickly written not optimized..)
  410. for (i=0; i<dstW; i++) {
  411. int val = lumDither[i & 7] << 12;
  412. int j;
  413. for (j=0; j<lumFilterSize; j++)
  414. val += lumSrc[j][i] * lumFilter[j];
  415. yDest[i]= av_clip_uint8(val>>19);
  416. }
  417. if (uDest)
  418. for (i=0; i<chrDstW; i++) {
  419. int u = chrDither[i & 7] << 12;
  420. int v = chrDither[(i + 3) & 7] << 12;
  421. int j;
  422. for (j=0; j<chrFilterSize; j++) {
  423. u += chrUSrc[j][i] * chrFilter[j];
  424. v += chrVSrc[j][i] * chrFilter[j];
  425. }
  426. uDest[i]= av_clip_uint8(u>>19);
  427. vDest[i]= av_clip_uint8(v>>19);
  428. }
  429. if (CONFIG_SWSCALE_ALPHA && aDest)
  430. for (i=0; i<dstW; i++) {
  431. int val = lumDither[i & 7] << 12;
  432. int j;
  433. for (j=0; j<lumFilterSize; j++)
  434. val += alpSrc[j][i] * lumFilter[j];
  435. aDest[i]= av_clip_uint8(val>>19);
  436. }
  437. }
  438. static void yuv2yuv1_c(SwsContext *c, const int16_t *lumSrc,
  439. const int16_t *chrUSrc, const int16_t *chrVSrc,
  440. const int16_t *alpSrc,
  441. uint8_t *dest[4], int dstW, int chrDstW)
  442. {
  443. uint8_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2],
  444. *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL;
  445. int i;
  446. const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
  447. for (i=0; i<dstW; i++) {
  448. int val = (lumSrc[i]+ lumDither[i & 7]) >> 7;
  449. yDest[i]= av_clip_uint8(val);
  450. }
  451. if (uDest)
  452. for (i=0; i<chrDstW; i++) {
  453. int u = (chrUSrc[i] + chrDither[i & 7]) >> 7;
  454. int v = (chrVSrc[i] + chrDither[(i + 3) & 7]) >> 7;
  455. uDest[i]= av_clip_uint8(u);
  456. vDest[i]= av_clip_uint8(v);
  457. }
  458. if (CONFIG_SWSCALE_ALPHA && aDest)
  459. for (i=0; i<dstW; i++) {
  460. int val = (alpSrc[i] + lumDither[i & 7]) >> 7;
  461. aDest[i]= av_clip_uint8(val);
  462. }
  463. }
  464. static void yuv2nv12X_c(SwsContext *c, const int16_t *lumFilter,
  465. const int16_t **lumSrc, int lumFilterSize,
  466. const int16_t *chrFilter, const int16_t **chrUSrc,
  467. const int16_t **chrVSrc, int chrFilterSize,
  468. const int16_t **alpSrc, uint8_t *dest[4],
  469. int dstW, int chrDstW)
  470. {
  471. uint8_t *yDest = dest[0], *uDest = dest[1];
  472. enum PixelFormat dstFormat = c->dstFormat;
  473. const uint8_t *lumDither = c->lumDither8, *chrDither = c->chrDither8;
  474. //FIXME Optimize (just quickly written not optimized..)
  475. int i;
  476. for (i=0; i<dstW; i++) {
  477. int val = lumDither[i & 7] << 12;
  478. int j;
  479. for (j=0; j<lumFilterSize; j++)
  480. val += lumSrc[j][i] * lumFilter[j];
  481. yDest[i]= av_clip_uint8(val>>19);
  482. }
  483. if (!uDest)
  484. return;
  485. if (dstFormat == PIX_FMT_NV12)
  486. for (i=0; i<chrDstW; i++) {
  487. int u = chrDither[i & 7] << 12;
  488. int v = chrDither[(i + 3) & 7] << 12;
  489. int j;
  490. for (j=0; j<chrFilterSize; j++) {
  491. u += chrUSrc[j][i] * chrFilter[j];
  492. v += chrVSrc[j][i] * chrFilter[j];
  493. }
  494. uDest[2*i]= av_clip_uint8(u>>19);
  495. uDest[2*i+1]= av_clip_uint8(v>>19);
  496. }
  497. else
  498. for (i=0; i<chrDstW; i++) {
  499. int u = chrDither[i & 7] << 12;
  500. int v = chrDither[(i + 3) & 7] << 12;
  501. int j;
  502. for (j=0; j<chrFilterSize; j++) {
  503. u += chrUSrc[j][i] * chrFilter[j];
  504. v += chrVSrc[j][i] * chrFilter[j];
  505. }
  506. uDest[2*i]= av_clip_uint8(v>>19);
  507. uDest[2*i+1]= av_clip_uint8(u>>19);
  508. }
  509. }
  510. #define output_pixel(pos, val) \
  511. if (target == PIX_FMT_GRAY16BE) { \
  512. AV_WB16(pos, val); \
  513. } else { \
  514. AV_WL16(pos, val); \
  515. }
  516. static av_always_inline void
  517. yuv2gray16_X_c_template(SwsContext *c, const int16_t *lumFilter,
  518. const int32_t **lumSrc, int lumFilterSize,
  519. const int16_t *chrFilter, const int32_t **chrUSrc,
  520. const int32_t **chrVSrc, int chrFilterSize,
  521. const int32_t **alpSrc, uint16_t *dest, int dstW,
  522. int y, enum PixelFormat target)
  523. {
  524. int i;
  525. for (i = 0; i < (dstW >> 1); i++) {
  526. int j;
  527. int Y1 = 1 << 14;
  528. int Y2 = 1 << 14;
  529. for (j = 0; j < lumFilterSize; j++) {
  530. Y1 += lumSrc[j][i * 2] * lumFilter[j];
  531. Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
  532. }
  533. Y1 >>= 15;
  534. Y2 >>= 15;
  535. if ((Y1 | Y2) & 0x10000) {
  536. Y1 = av_clip_uint16(Y1);
  537. Y2 = av_clip_uint16(Y2);
  538. }
  539. output_pixel(&dest[i * 2 + 0], Y1);
  540. output_pixel(&dest[i * 2 + 1], Y2);
  541. }
  542. }
  543. static av_always_inline void
  544. yuv2gray16_2_c_template(SwsContext *c, const int32_t *buf[2],
  545. const int32_t *ubuf[2], const int32_t *vbuf[2],
  546. const int32_t *abuf[2], uint16_t *dest, int dstW,
  547. int yalpha, int uvalpha, int y,
  548. enum PixelFormat target)
  549. {
  550. int yalpha1 = 4095 - yalpha;
  551. int i;
  552. const int32_t *buf0 = buf[0], *buf1 = buf[1];
  553. for (i = 0; i < (dstW >> 1); i++) {
  554. int Y1 = (buf0[i * 2 ] * yalpha1 + buf1[i * 2 ] * yalpha) >> 15;
  555. int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 15;
  556. output_pixel(&dest[i * 2 + 0], Y1);
  557. output_pixel(&dest[i * 2 + 1], Y2);
  558. }
  559. }
  560. static av_always_inline void
  561. yuv2gray16_1_c_template(SwsContext *c, const int32_t *buf0,
  562. const int32_t *ubuf[2], const int32_t *vbuf[2],
  563. const int32_t *abuf0, uint16_t *dest, int dstW,
  564. int uvalpha, int y, enum PixelFormat target)
  565. {
  566. int i;
  567. for (i = 0; i < (dstW >> 1); i++) {
  568. int Y1 = (buf0[i * 2 ]+4)>>3;
  569. int Y2 = (buf0[i * 2 + 1]+4)>>3;
  570. output_pixel(&dest[i * 2 + 0], Y1);
  571. output_pixel(&dest[i * 2 + 1], Y2);
  572. }
  573. }
  574. #undef output_pixel
  575. #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
  576. static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
  577. const int16_t **_lumSrc, int lumFilterSize, \
  578. const int16_t *chrFilter, const int16_t **_chrUSrc, \
  579. const int16_t **_chrVSrc, int chrFilterSize, \
  580. const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
  581. int y) \
  582. { \
  583. const int32_t **lumSrc = (const int32_t **) _lumSrc, \
  584. **chrUSrc = (const int32_t **) _chrUSrc, \
  585. **chrVSrc = (const int32_t **) _chrVSrc, \
  586. **alpSrc = (const int32_t **) _alpSrc; \
  587. uint16_t *dest = (uint16_t *) _dest; \
  588. name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
  589. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  590. alpSrc, dest, dstW, y, fmt); \
  591. } \
  592. \
  593. static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
  594. const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
  595. const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
  596. int yalpha, int uvalpha, int y) \
  597. { \
  598. const int32_t **buf = (const int32_t **) _buf, \
  599. **ubuf = (const int32_t **) _ubuf, \
  600. **vbuf = (const int32_t **) _vbuf, \
  601. **abuf = (const int32_t **) _abuf; \
  602. uint16_t *dest = (uint16_t *) _dest; \
  603. name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
  604. dest, dstW, yalpha, uvalpha, y, fmt); \
  605. } \
  606. \
  607. static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
  608. const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
  609. const int16_t *_abuf0, uint8_t *_dest, int dstW, \
  610. int uvalpha, int y) \
  611. { \
  612. const int32_t *buf0 = (const int32_t *) _buf0, \
  613. **ubuf = (const int32_t **) _ubuf, \
  614. **vbuf = (const int32_t **) _vbuf, \
  615. *abuf0 = (const int32_t *) _abuf0; \
  616. uint16_t *dest = (uint16_t *) _dest; \
  617. name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
  618. dstW, uvalpha, y, fmt); \
  619. }
  620. YUV2PACKED16WRAPPER(yuv2gray16,, LE, PIX_FMT_GRAY16LE);
  621. YUV2PACKED16WRAPPER(yuv2gray16,, BE, PIX_FMT_GRAY16BE);
  622. #define output_pixel(pos, acc) \
  623. if (target == PIX_FMT_MONOBLACK) { \
  624. pos = acc; \
  625. } else { \
  626. pos = ~acc; \
  627. }
  628. static av_always_inline void
  629. yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
  630. const int16_t **lumSrc, int lumFilterSize,
  631. const int16_t *chrFilter, const int16_t **chrUSrc,
  632. const int16_t **chrVSrc, int chrFilterSize,
  633. const int16_t **alpSrc, uint8_t *dest, int dstW,
  634. int y, enum PixelFormat target)
  635. {
  636. const uint8_t * const d128=dither_8x8_220[y&7];
  637. uint8_t *g = c->table_gU[128] + c->table_gV[128];
  638. int i;
  639. int acc = 0;
  640. for (i = 0; i < dstW - 1; i += 2) {
  641. int j;
  642. int Y1 = 1 << 18;
  643. int Y2 = 1 << 18;
  644. for (j = 0; j < lumFilterSize; j++) {
  645. Y1 += lumSrc[j][i] * lumFilter[j];
  646. Y2 += lumSrc[j][i+1] * lumFilter[j];
  647. }
  648. Y1 >>= 19;
  649. Y2 >>= 19;
  650. if ((Y1 | Y2) & 0x100) {
  651. Y1 = av_clip_uint8(Y1);
  652. Y2 = av_clip_uint8(Y2);
  653. }
  654. acc += acc + g[Y1 + d128[(i + 0) & 7]];
  655. acc += acc + g[Y2 + d128[(i + 1) & 7]];
  656. if ((i & 7) == 6) {
  657. output_pixel(*dest++, acc);
  658. }
  659. }
  660. }
  661. static av_always_inline void
  662. yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
  663. const int16_t *ubuf[2], const int16_t *vbuf[2],
  664. const int16_t *abuf[2], uint8_t *dest, int dstW,
  665. int yalpha, int uvalpha, int y,
  666. enum PixelFormat target)
  667. {
  668. const int16_t *buf0 = buf[0], *buf1 = buf[1];
  669. const uint8_t * const d128 = dither_8x8_220[y & 7];
  670. uint8_t *g = c->table_gU[128] + c->table_gV[128];
  671. int yalpha1 = 4095 - yalpha;
  672. int i;
  673. for (i = 0; i < dstW - 7; i += 8) {
  674. int acc = g[((buf0[i ] * yalpha1 + buf1[i ] * yalpha) >> 19) + d128[0]];
  675. acc += acc + g[((buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19) + d128[1]];
  676. acc += acc + g[((buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19) + d128[2]];
  677. acc += acc + g[((buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19) + d128[3]];
  678. acc += acc + g[((buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19) + d128[4]];
  679. acc += acc + g[((buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19) + d128[5]];
  680. acc += acc + g[((buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19) + d128[6]];
  681. acc += acc + g[((buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19) + d128[7]];
  682. output_pixel(*dest++, acc);
  683. }
  684. }
  685. static av_always_inline void
  686. yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
  687. const int16_t *ubuf[2], const int16_t *vbuf[2],
  688. const int16_t *abuf0, uint8_t *dest, int dstW,
  689. int uvalpha, int y, enum PixelFormat target)
  690. {
  691. const uint8_t * const d128 = dither_8x8_220[y & 7];
  692. uint8_t *g = c->table_gU[128] + c->table_gV[128];
  693. int i;
  694. for (i = 0; i < dstW - 7; i += 8) {
  695. int acc = g[(buf0[i ] >> 7) + d128[0]];
  696. acc += acc + g[(buf0[i + 1] >> 7) + d128[1]];
  697. acc += acc + g[(buf0[i + 2] >> 7) + d128[2]];
  698. acc += acc + g[(buf0[i + 3] >> 7) + d128[3]];
  699. acc += acc + g[(buf0[i + 4] >> 7) + d128[4]];
  700. acc += acc + g[(buf0[i + 5] >> 7) + d128[5]];
  701. acc += acc + g[(buf0[i + 6] >> 7) + d128[6]];
  702. acc += acc + g[(buf0[i + 7] >> 7) + d128[7]];
  703. output_pixel(*dest++, acc);
  704. }
  705. }
  706. #undef output_pixel
  707. #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
  708. static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
  709. const int16_t **lumSrc, int lumFilterSize, \
  710. const int16_t *chrFilter, const int16_t **chrUSrc, \
  711. const int16_t **chrVSrc, int chrFilterSize, \
  712. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  713. int y) \
  714. { \
  715. name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
  716. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  717. alpSrc, dest, dstW, y, fmt); \
  718. } \
  719. \
  720. static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
  721. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  722. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  723. int yalpha, int uvalpha, int y) \
  724. { \
  725. name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
  726. dest, dstW, yalpha, uvalpha, y, fmt); \
  727. } \
  728. \
  729. static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
  730. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  731. const int16_t *abuf0, uint8_t *dest, int dstW, \
  732. int uvalpha, int y) \
  733. { \
  734. name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
  735. abuf0, dest, dstW, uvalpha, \
  736. y, fmt); \
  737. }
  738. YUV2PACKEDWRAPPER(yuv2mono,, white, PIX_FMT_MONOWHITE);
  739. YUV2PACKEDWRAPPER(yuv2mono,, black, PIX_FMT_MONOBLACK);
  740. #define output_pixels(pos, Y1, U, Y2, V) \
  741. if (target == PIX_FMT_YUYV422) { \
  742. dest[pos + 0] = Y1; \
  743. dest[pos + 1] = U; \
  744. dest[pos + 2] = Y2; \
  745. dest[pos + 3] = V; \
  746. } else { \
  747. dest[pos + 0] = U; \
  748. dest[pos + 1] = Y1; \
  749. dest[pos + 2] = V; \
  750. dest[pos + 3] = Y2; \
  751. }
  752. static av_always_inline void
  753. yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
  754. const int16_t **lumSrc, int lumFilterSize,
  755. const int16_t *chrFilter, const int16_t **chrUSrc,
  756. const int16_t **chrVSrc, int chrFilterSize,
  757. const int16_t **alpSrc, uint8_t *dest, int dstW,
  758. int y, enum PixelFormat target)
  759. {
  760. int i;
  761. for (i = 0; i < (dstW >> 1); i++) {
  762. int j;
  763. int Y1 = 1 << 18;
  764. int Y2 = 1 << 18;
  765. int U = 1 << 18;
  766. int V = 1 << 18;
  767. for (j = 0; j < lumFilterSize; j++) {
  768. Y1 += lumSrc[j][i * 2] * lumFilter[j];
  769. Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
  770. }
  771. for (j = 0; j < chrFilterSize; j++) {
  772. U += chrUSrc[j][i] * chrFilter[j];
  773. V += chrVSrc[j][i] * chrFilter[j];
  774. }
  775. Y1 >>= 19;
  776. Y2 >>= 19;
  777. U >>= 19;
  778. V >>= 19;
  779. if ((Y1 | Y2 | U | V) & 0x100) {
  780. Y1 = av_clip_uint8(Y1);
  781. Y2 = av_clip_uint8(Y2);
  782. U = av_clip_uint8(U);
  783. V = av_clip_uint8(V);
  784. }
  785. output_pixels(4*i, Y1, U, Y2, V);
  786. }
  787. }
  788. static av_always_inline void
  789. yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
  790. const int16_t *ubuf[2], const int16_t *vbuf[2],
  791. const int16_t *abuf[2], uint8_t *dest, int dstW,
  792. int yalpha, int uvalpha, int y,
  793. enum PixelFormat target)
  794. {
  795. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  796. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  797. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  798. int yalpha1 = 4095 - yalpha;
  799. int uvalpha1 = 4095 - uvalpha;
  800. int i;
  801. for (i = 0; i < (dstW >> 1); i++) {
  802. int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
  803. int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
  804. int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
  805. int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
  806. output_pixels(i * 4, Y1, U, Y2, V);
  807. }
  808. }
  809. static av_always_inline void
  810. yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
  811. const int16_t *ubuf[2], const int16_t *vbuf[2],
  812. const int16_t *abuf0, uint8_t *dest, int dstW,
  813. int uvalpha, int y, enum PixelFormat target)
  814. {
  815. const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  816. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  817. int i;
  818. if (uvalpha < 2048) {
  819. for (i = 0; i < (dstW >> 1); i++) {
  820. int Y1 = buf0[i * 2] >> 7;
  821. int Y2 = buf0[i * 2 + 1] >> 7;
  822. int U = ubuf1[i] >> 7;
  823. int V = vbuf1[i] >> 7;
  824. output_pixels(i * 4, Y1, U, Y2, V);
  825. }
  826. } else {
  827. for (i = 0; i < (dstW >> 1); i++) {
  828. int Y1 = buf0[i * 2] >> 7;
  829. int Y2 = buf0[i * 2 + 1] >> 7;
  830. int U = (ubuf0[i] + ubuf1[i]) >> 8;
  831. int V = (vbuf0[i] + vbuf1[i]) >> 8;
  832. output_pixels(i * 4, Y1, U, Y2, V);
  833. }
  834. }
  835. }
  836. #undef output_pixels
  837. YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, PIX_FMT_YUYV422);
  838. YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, PIX_FMT_UYVY422);
  839. #define R_B ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? R : B)
  840. #define B_R ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? B : R)
  841. #define output_pixel(pos, val) \
  842. if (isBE(target)) { \
  843. AV_WB16(pos, val); \
  844. } else { \
  845. AV_WL16(pos, val); \
  846. }
  847. static av_always_inline void
  848. yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter,
  849. const int32_t **lumSrc, int lumFilterSize,
  850. const int16_t *chrFilter, const int32_t **chrUSrc,
  851. const int32_t **chrVSrc, int chrFilterSize,
  852. const int32_t **alpSrc, uint16_t *dest, int dstW,
  853. int y, enum PixelFormat target)
  854. {
  855. int i;
  856. for (i = 0; i < (dstW >> 1); i++) {
  857. int j;
  858. int Y1 = 0;
  859. int Y2 = 0;
  860. int U = -128 << 23; // 19
  861. int V = -128 << 23;
  862. int R, G, B;
  863. for (j = 0; j < lumFilterSize; j++) {
  864. Y1 += lumSrc[j][i * 2] * lumFilter[j];
  865. Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
  866. }
  867. for (j = 0; j < chrFilterSize; j++) {
  868. U += chrUSrc[j][i] * chrFilter[j];
  869. V += chrVSrc[j][i] * chrFilter[j];
  870. }
  871. // 8bit: 12+15=27; 16-bit: 12+19=31
  872. Y1 >>= 14; // 10
  873. Y2 >>= 14;
  874. U >>= 14;
  875. V >>= 14;
  876. // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
  877. Y1 -= c->yuv2rgb_y_offset;
  878. Y2 -= c->yuv2rgb_y_offset;
  879. Y1 *= c->yuv2rgb_y_coeff;
  880. Y2 *= c->yuv2rgb_y_coeff;
  881. Y1 += 1 << 13; // 21
  882. Y2 += 1 << 13;
  883. // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
  884. R = V * c->yuv2rgb_v2r_coeff;
  885. G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
  886. B = U * c->yuv2rgb_u2b_coeff;
  887. // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
  888. output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
  889. output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
  890. output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
  891. output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
  892. output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
  893. output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
  894. dest += 6;
  895. }
  896. }
  897. static av_always_inline void
  898. yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2],
  899. const int32_t *ubuf[2], const int32_t *vbuf[2],
  900. const int32_t *abuf[2], uint16_t *dest, int dstW,
  901. int yalpha, int uvalpha, int y,
  902. enum PixelFormat target)
  903. {
  904. const int32_t *buf0 = buf[0], *buf1 = buf[1],
  905. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  906. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  907. int yalpha1 = 4095 - yalpha;
  908. int uvalpha1 = 4095 - uvalpha;
  909. int i;
  910. for (i = 0; i < (dstW >> 1); i++) {
  911. int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
  912. int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
  913. int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
  914. int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
  915. int R, G, B;
  916. Y1 -= c->yuv2rgb_y_offset;
  917. Y2 -= c->yuv2rgb_y_offset;
  918. Y1 *= c->yuv2rgb_y_coeff;
  919. Y2 *= c->yuv2rgb_y_coeff;
  920. Y1 += 1 << 13;
  921. Y2 += 1 << 13;
  922. R = V * c->yuv2rgb_v2r_coeff;
  923. G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
  924. B = U * c->yuv2rgb_u2b_coeff;
  925. output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
  926. output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
  927. output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
  928. output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
  929. output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
  930. output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
  931. dest += 6;
  932. }
  933. }
  934. static av_always_inline void
  935. yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0,
  936. const int32_t *ubuf[2], const int32_t *vbuf[2],
  937. const int32_t *abuf0, uint16_t *dest, int dstW,
  938. int uvalpha, int y, enum PixelFormat target)
  939. {
  940. const int32_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  941. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  942. int i;
  943. if (uvalpha < 2048) {
  944. for (i = 0; i < (dstW >> 1); i++) {
  945. int Y1 = (buf0[i * 2] ) >> 2;
  946. int Y2 = (buf0[i * 2 + 1]) >> 2;
  947. int U = (ubuf0[i] + (-128 << 11)) >> 2;
  948. int V = (vbuf0[i] + (-128 << 11)) >> 2;
  949. int R, G, B;
  950. Y1 -= c->yuv2rgb_y_offset;
  951. Y2 -= c->yuv2rgb_y_offset;
  952. Y1 *= c->yuv2rgb_y_coeff;
  953. Y2 *= c->yuv2rgb_y_coeff;
  954. Y1 += 1 << 13;
  955. Y2 += 1 << 13;
  956. R = V * c->yuv2rgb_v2r_coeff;
  957. G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
  958. B = U * c->yuv2rgb_u2b_coeff;
  959. output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
  960. output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
  961. output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
  962. output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
  963. output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
  964. output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
  965. dest += 6;
  966. }
  967. } else {
  968. for (i = 0; i < (dstW >> 1); i++) {
  969. int Y1 = (buf0[i * 2] ) >> 2;
  970. int Y2 = (buf0[i * 2 + 1]) >> 2;
  971. int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
  972. int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
  973. int R, G, B;
  974. Y1 -= c->yuv2rgb_y_offset;
  975. Y2 -= c->yuv2rgb_y_offset;
  976. Y1 *= c->yuv2rgb_y_coeff;
  977. Y2 *= c->yuv2rgb_y_coeff;
  978. Y1 += 1 << 13;
  979. Y2 += 1 << 13;
  980. R = V * c->yuv2rgb_v2r_coeff;
  981. G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
  982. B = U * c->yuv2rgb_u2b_coeff;
  983. output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
  984. output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
  985. output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
  986. output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
  987. output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
  988. output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
  989. dest += 6;
  990. }
  991. }
  992. }
  993. #undef output_pixel
  994. #undef r_b
  995. #undef b_r
  996. YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, PIX_FMT_RGB48BE);
  997. YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, PIX_FMT_RGB48LE);
  998. YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, PIX_FMT_BGR48BE);
  999. YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, PIX_FMT_BGR48LE);
  1000. static av_always_inline void
  1001. yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
  1002. int U, int V, int A1, int A2,
  1003. const void *_r, const void *_g, const void *_b, int y,
  1004. enum PixelFormat target, int hasAlpha)
  1005. {
  1006. if (target == PIX_FMT_ARGB || target == PIX_FMT_RGBA ||
  1007. target == PIX_FMT_ABGR || target == PIX_FMT_BGRA) {
  1008. uint32_t *dest = (uint32_t *) _dest;
  1009. const uint32_t *r = (const uint32_t *) _r;
  1010. const uint32_t *g = (const uint32_t *) _g;
  1011. const uint32_t *b = (const uint32_t *) _b;
  1012. #if CONFIG_SMALL
  1013. int sh = hasAlpha ? ((target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24) : 0;
  1014. dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
  1015. dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
  1016. #else
  1017. if (hasAlpha) {
  1018. int sh = (target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24;
  1019. dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
  1020. dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
  1021. } else {
  1022. dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
  1023. dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
  1024. }
  1025. #endif
  1026. } else if (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) {
  1027. uint8_t *dest = (uint8_t *) _dest;
  1028. const uint8_t *r = (const uint8_t *) _r;
  1029. const uint8_t *g = (const uint8_t *) _g;
  1030. const uint8_t *b = (const uint8_t *) _b;
  1031. #define r_b ((target == PIX_FMT_RGB24) ? r : b)
  1032. #define b_r ((target == PIX_FMT_RGB24) ? b : r)
  1033. dest[i * 6 + 0] = r_b[Y1];
  1034. dest[i * 6 + 1] = g[Y1];
  1035. dest[i * 6 + 2] = b_r[Y1];
  1036. dest[i * 6 + 3] = r_b[Y2];
  1037. dest[i * 6 + 4] = g[Y2];
  1038. dest[i * 6 + 5] = b_r[Y2];
  1039. #undef r_b
  1040. #undef b_r
  1041. } else if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565 ||
  1042. target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555 ||
  1043. target == PIX_FMT_RGB444 || target == PIX_FMT_BGR444) {
  1044. uint16_t *dest = (uint16_t *) _dest;
  1045. const uint16_t *r = (const uint16_t *) _r;
  1046. const uint16_t *g = (const uint16_t *) _g;
  1047. const uint16_t *b = (const uint16_t *) _b;
  1048. int dr1, dg1, db1, dr2, dg2, db2;
  1049. if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565) {
  1050. dr1 = dither_2x2_8[ y & 1 ][0];
  1051. dg1 = dither_2x2_4[ y & 1 ][0];
  1052. db1 = dither_2x2_8[(y & 1) ^ 1][0];
  1053. dr2 = dither_2x2_8[ y & 1 ][1];
  1054. dg2 = dither_2x2_4[ y & 1 ][1];
  1055. db2 = dither_2x2_8[(y & 1) ^ 1][1];
  1056. } else if (target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555) {
  1057. dr1 = dither_2x2_8[ y & 1 ][0];
  1058. dg1 = dither_2x2_8[ y & 1 ][1];
  1059. db1 = dither_2x2_8[(y & 1) ^ 1][0];
  1060. dr2 = dither_2x2_8[ y & 1 ][1];
  1061. dg2 = dither_2x2_8[ y & 1 ][0];
  1062. db2 = dither_2x2_8[(y & 1) ^ 1][1];
  1063. } else {
  1064. dr1 = dither_4x4_16[ y & 3 ][0];
  1065. dg1 = dither_4x4_16[ y & 3 ][1];
  1066. db1 = dither_4x4_16[(y & 3) ^ 3][0];
  1067. dr2 = dither_4x4_16[ y & 3 ][1];
  1068. dg2 = dither_4x4_16[ y & 3 ][0];
  1069. db2 = dither_4x4_16[(y & 3) ^ 3][1];
  1070. }
  1071. dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
  1072. dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
  1073. } else /* 8/4-bit */ {
  1074. uint8_t *dest = (uint8_t *) _dest;
  1075. const uint8_t *r = (const uint8_t *) _r;
  1076. const uint8_t *g = (const uint8_t *) _g;
  1077. const uint8_t *b = (const uint8_t *) _b;
  1078. int dr1, dg1, db1, dr2, dg2, db2;
  1079. if (target == PIX_FMT_RGB8 || target == PIX_FMT_BGR8) {
  1080. const uint8_t * const d64 = dither_8x8_73[y & 7];
  1081. const uint8_t * const d32 = dither_8x8_32[y & 7];
  1082. dr1 = dg1 = d32[(i * 2 + 0) & 7];
  1083. db1 = d64[(i * 2 + 0) & 7];
  1084. dr2 = dg2 = d32[(i * 2 + 1) & 7];
  1085. db2 = d64[(i * 2 + 1) & 7];
  1086. } else {
  1087. const uint8_t * const d64 = dither_8x8_73 [y & 7];
  1088. const uint8_t * const d128 = dither_8x8_220[y & 7];
  1089. dr1 = db1 = d128[(i * 2 + 0) & 7];
  1090. dg1 = d64[(i * 2 + 0) & 7];
  1091. dr2 = db2 = d128[(i * 2 + 1) & 7];
  1092. dg2 = d64[(i * 2 + 1) & 7];
  1093. }
  1094. if (target == PIX_FMT_RGB4 || target == PIX_FMT_BGR4) {
  1095. dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
  1096. ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
  1097. } else {
  1098. dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
  1099. dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
  1100. }
  1101. }
  1102. }
  1103. static av_always_inline void
  1104. yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
  1105. const int16_t **lumSrc, int lumFilterSize,
  1106. const int16_t *chrFilter, const int16_t **chrUSrc,
  1107. const int16_t **chrVSrc, int chrFilterSize,
  1108. const int16_t **alpSrc, uint8_t *dest, int dstW,
  1109. int y, enum PixelFormat target, int hasAlpha)
  1110. {
  1111. int i;
  1112. for (i = 0; i < (dstW >> 1); i++) {
  1113. int j;
  1114. int Y1 = 1 << 18;
  1115. int Y2 = 1 << 18;
  1116. int U = 1 << 18;
  1117. int V = 1 << 18;
  1118. int av_unused A1, A2;
  1119. const void *r, *g, *b;
  1120. for (j = 0; j < lumFilterSize; j++) {
  1121. Y1 += lumSrc[j][i * 2] * lumFilter[j];
  1122. Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
  1123. }
  1124. for (j = 0; j < chrFilterSize; j++) {
  1125. U += chrUSrc[j][i] * chrFilter[j];
  1126. V += chrVSrc[j][i] * chrFilter[j];
  1127. }
  1128. Y1 >>= 19;
  1129. Y2 >>= 19;
  1130. U >>= 19;
  1131. V >>= 19;
  1132. if ((Y1 | Y2 | U | V) & 0x100) {
  1133. Y1 = av_clip_uint8(Y1);
  1134. Y2 = av_clip_uint8(Y2);
  1135. U = av_clip_uint8(U);
  1136. V = av_clip_uint8(V);
  1137. }
  1138. if (hasAlpha) {
  1139. A1 = 1 << 18;
  1140. A2 = 1 << 18;
  1141. for (j = 0; j < lumFilterSize; j++) {
  1142. A1 += alpSrc[j][i * 2 ] * lumFilter[j];
  1143. A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
  1144. }
  1145. A1 >>= 19;
  1146. A2 >>= 19;
  1147. if ((A1 | A2) & 0x100) {
  1148. A1 = av_clip_uint8(A1);
  1149. A2 = av_clip_uint8(A2);
  1150. }
  1151. }
  1152. /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/
  1153. r = c->table_rV[V];
  1154. g = (c->table_gU[U] + c->table_gV[V]);
  1155. b = c->table_bU[U];
  1156. yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
  1157. r, g, b, y, target, hasAlpha);
  1158. }
  1159. }
  1160. static av_always_inline void
  1161. yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
  1162. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1163. const int16_t *abuf[2], uint8_t *dest, int dstW,
  1164. int yalpha, int uvalpha, int y,
  1165. enum PixelFormat target, int hasAlpha)
  1166. {
  1167. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  1168. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1169. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
  1170. *abuf0 = hasAlpha ? abuf[0] : NULL,
  1171. *abuf1 = hasAlpha ? abuf[1] : NULL;
  1172. int yalpha1 = 4095 - yalpha;
  1173. int uvalpha1 = 4095 - uvalpha;
  1174. int i;
  1175. for (i = 0; i < (dstW >> 1); i++) {
  1176. int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
  1177. int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
  1178. int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
  1179. int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
  1180. int A1, A2;
  1181. const void *r = c->table_rV[V],
  1182. *g = (c->table_gU[U] + c->table_gV[V]),
  1183. *b = c->table_bU[U];
  1184. if (hasAlpha) {
  1185. A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
  1186. A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
  1187. }
  1188. yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
  1189. r, g, b, y, target, hasAlpha);
  1190. }
  1191. }
  1192. static av_always_inline void
  1193. yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
  1194. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1195. const int16_t *abuf0, uint8_t *dest, int dstW,
  1196. int uvalpha, int y, enum PixelFormat target,
  1197. int hasAlpha)
  1198. {
  1199. const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
  1200. *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
  1201. int i;
  1202. if (uvalpha < 2048) {
  1203. for (i = 0; i < (dstW >> 1); i++) {
  1204. int Y1 = buf0[i * 2] >> 7;
  1205. int Y2 = buf0[i * 2 + 1] >> 7;
  1206. int U = ubuf1[i] >> 7;
  1207. int V = vbuf1[i] >> 7;
  1208. int A1, A2;
  1209. const void *r = c->table_rV[V],
  1210. *g = (c->table_gU[U] + c->table_gV[V]),
  1211. *b = c->table_bU[U];
  1212. if (hasAlpha) {
  1213. A1 = abuf0[i * 2 ] >> 7;
  1214. A2 = abuf0[i * 2 + 1] >> 7;
  1215. }
  1216. yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
  1217. r, g, b, y, target, hasAlpha);
  1218. }
  1219. } else {
  1220. for (i = 0; i < (dstW >> 1); i++) {
  1221. int Y1 = buf0[i * 2] >> 7;
  1222. int Y2 = buf0[i * 2 + 1] >> 7;
  1223. int U = (ubuf0[i] + ubuf1[i]) >> 8;
  1224. int V = (vbuf0[i] + vbuf1[i]) >> 8;
  1225. int A1, A2;
  1226. const void *r = c->table_rV[V],
  1227. *g = (c->table_gU[U] + c->table_gV[V]),
  1228. *b = c->table_bU[U];
  1229. if (hasAlpha) {
  1230. A1 = abuf0[i * 2 ] >> 7;
  1231. A2 = abuf0[i * 2 + 1] >> 7;
  1232. }
  1233. yuv2rgb_write(dest, i, Y1, Y2, U, V, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
  1234. r, g, b, y, target, hasAlpha);
  1235. }
  1236. }
  1237. }
  1238. #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  1239. static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
  1240. const int16_t **lumSrc, int lumFilterSize, \
  1241. const int16_t *chrFilter, const int16_t **chrUSrc, \
  1242. const int16_t **chrVSrc, int chrFilterSize, \
  1243. const int16_t **alpSrc, uint8_t *dest, int dstW, \
  1244. int y) \
  1245. { \
  1246. name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
  1247. chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
  1248. alpSrc, dest, dstW, y, fmt, hasAlpha); \
  1249. }
  1250. #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
  1251. YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
  1252. static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
  1253. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1254. const int16_t *abuf[2], uint8_t *dest, int dstW, \
  1255. int yalpha, int uvalpha, int y) \
  1256. { \
  1257. name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
  1258. dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
  1259. } \
  1260. \
  1261. static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
  1262. const int16_t *ubuf[2], const int16_t *vbuf[2], \
  1263. const int16_t *abuf0, uint8_t *dest, int dstW, \
  1264. int uvalpha, int y) \
  1265. { \
  1266. name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
  1267. dstW, uvalpha, y, fmt, hasAlpha); \
  1268. }
  1269. #if CONFIG_SMALL
  1270. YUV2RGBWRAPPER(yuv2rgb,, 32_1, PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1271. YUV2RGBWRAPPER(yuv2rgb,, 32, PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1272. #else
  1273. #if CONFIG_SWSCALE_ALPHA
  1274. YUV2RGBWRAPPER(yuv2rgb,, a32_1, PIX_FMT_RGB32_1, 1);
  1275. YUV2RGBWRAPPER(yuv2rgb,, a32, PIX_FMT_RGB32, 1);
  1276. #endif
  1277. YUV2RGBWRAPPER(yuv2rgb,, x32_1, PIX_FMT_RGB32_1, 0);
  1278. YUV2RGBWRAPPER(yuv2rgb,, x32, PIX_FMT_RGB32, 0);
  1279. #endif
  1280. YUV2RGBWRAPPER(yuv2, rgb, rgb24, PIX_FMT_RGB24, 0);
  1281. YUV2RGBWRAPPER(yuv2, rgb, bgr24, PIX_FMT_BGR24, 0);
  1282. YUV2RGBWRAPPER(yuv2rgb,, 16, PIX_FMT_RGB565, 0);
  1283. YUV2RGBWRAPPER(yuv2rgb,, 15, PIX_FMT_RGB555, 0);
  1284. YUV2RGBWRAPPER(yuv2rgb,, 12, PIX_FMT_RGB444, 0);
  1285. YUV2RGBWRAPPER(yuv2rgb,, 8, PIX_FMT_RGB8, 0);
  1286. YUV2RGBWRAPPER(yuv2rgb,, 4, PIX_FMT_RGB4, 0);
  1287. YUV2RGBWRAPPER(yuv2rgb,, 4b, PIX_FMT_RGB4_BYTE, 0);
  1288. static av_always_inline void
  1289. yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
  1290. const int16_t **lumSrc, int lumFilterSize,
  1291. const int16_t *chrFilter, const int16_t **chrUSrc,
  1292. const int16_t **chrVSrc, int chrFilterSize,
  1293. const int16_t **alpSrc, uint8_t *dest,
  1294. int dstW, int y, enum PixelFormat target, int hasAlpha)
  1295. {
  1296. int i;
  1297. int step = (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) ? 3 : 4;
  1298. for (i = 0; i < dstW; i++) {
  1299. int j;
  1300. int Y = 1<<9;
  1301. int U = (1<<9)-(128 << 19);
  1302. int V = (1<<9)-(128 << 19);
  1303. int av_unused A;
  1304. int R, G, B;
  1305. for (j = 0; j < lumFilterSize; j++) {
  1306. Y += lumSrc[j][i] * lumFilter[j];
  1307. }
  1308. for (j = 0; j < chrFilterSize; j++) {
  1309. U += chrUSrc[j][i] * chrFilter[j];
  1310. V += chrVSrc[j][i] * chrFilter[j];
  1311. }
  1312. Y >>= 10;
  1313. U >>= 10;
  1314. V >>= 10;
  1315. if (hasAlpha) {
  1316. A = 1 << 18;
  1317. for (j = 0; j < lumFilterSize; j++) {
  1318. A += alpSrc[j][i] * lumFilter[j];
  1319. }
  1320. A >>= 19;
  1321. if (A & 0x100)
  1322. A = av_clip_uint8(A);
  1323. }
  1324. Y -= c->yuv2rgb_y_offset;
  1325. Y *= c->yuv2rgb_y_coeff;
  1326. Y += 1 << 21;
  1327. R = Y + V*c->yuv2rgb_v2r_coeff;
  1328. G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
  1329. B = Y + U*c->yuv2rgb_u2b_coeff;
  1330. if ((R | G | B) & 0xC0000000) {
  1331. R = av_clip_uintp2(R, 30);
  1332. G = av_clip_uintp2(G, 30);
  1333. B = av_clip_uintp2(B, 30);
  1334. }
  1335. switch(target) {
  1336. case PIX_FMT_ARGB:
  1337. dest[0] = hasAlpha ? A : 255;
  1338. dest[1] = R >> 22;
  1339. dest[2] = G >> 22;
  1340. dest[3] = B >> 22;
  1341. break;
  1342. case PIX_FMT_RGB24:
  1343. dest[0] = R >> 22;
  1344. dest[1] = G >> 22;
  1345. dest[2] = B >> 22;
  1346. break;
  1347. case PIX_FMT_RGBA:
  1348. dest[0] = R >> 22;
  1349. dest[1] = G >> 22;
  1350. dest[2] = B >> 22;
  1351. dest[3] = hasAlpha ? A : 255;
  1352. break;
  1353. case PIX_FMT_ABGR:
  1354. dest[0] = hasAlpha ? A : 255;
  1355. dest[1] = B >> 22;
  1356. dest[2] = G >> 22;
  1357. dest[3] = R >> 22;
  1358. break;
  1359. case PIX_FMT_BGR24:
  1360. dest[0] = B >> 22;
  1361. dest[1] = G >> 22;
  1362. dest[2] = R >> 22;
  1363. break;
  1364. case PIX_FMT_BGRA:
  1365. dest[0] = B >> 22;
  1366. dest[1] = G >> 22;
  1367. dest[2] = R >> 22;
  1368. dest[3] = hasAlpha ? A : 255;
  1369. break;
  1370. }
  1371. dest += step;
  1372. }
  1373. }
  1374. #if CONFIG_SMALL
  1375. YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1376. YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1377. YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1378. YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf);
  1379. #else
  1380. #if CONFIG_SWSCALE_ALPHA
  1381. YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, 1);
  1382. YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, 1);
  1383. YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, 1);
  1384. YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, 1);
  1385. #endif
  1386. YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, PIX_FMT_BGRA, 0);
  1387. YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, PIX_FMT_ABGR, 0);
  1388. YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, PIX_FMT_RGBA, 0);
  1389. YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, PIX_FMT_ARGB, 0);
  1390. #endif
  1391. YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, PIX_FMT_BGR24, 0);
  1392. YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, PIX_FMT_RGB24, 0);
  1393. static av_always_inline void fillPlane(uint8_t* plane, int stride,
  1394. int width, int height,
  1395. int y, uint8_t val)
  1396. {
  1397. int i;
  1398. uint8_t *ptr = plane + stride*y;
  1399. for (i=0; i<height; i++) {
  1400. memset(ptr, val, width);
  1401. ptr += stride;
  1402. }
  1403. }
  1404. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  1405. #define r ((origin == PIX_FMT_BGR48BE || origin == PIX_FMT_BGR48LE) ? b_r : r_b)
  1406. #define b ((origin == PIX_FMT_BGR48BE || origin == PIX_FMT_BGR48LE) ? r_b : b_r)
  1407. static av_always_inline void
  1408. rgb48ToY_c_template(uint16_t *dst, const uint16_t *src, int width,
  1409. enum PixelFormat origin)
  1410. {
  1411. int i;
  1412. for (i = 0; i < width; i++) {
  1413. unsigned int r_b = input_pixel(&src[i*3+0]);
  1414. unsigned int g = input_pixel(&src[i*3+1]);
  1415. unsigned int b_r = input_pixel(&src[i*3+2]);
  1416. dst[i] = (RY*r + GY*g + BY*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1417. }
  1418. }
  1419. static av_always_inline void
  1420. rgb48ToUV_c_template(uint16_t *dstU, uint16_t *dstV,
  1421. const uint16_t *src1, const uint16_t *src2,
  1422. int width, enum PixelFormat origin)
  1423. {
  1424. int i;
  1425. assert(src1==src2);
  1426. for (i = 0; i < width; i++) {
  1427. int r_b = input_pixel(&src1[i*3+0]);
  1428. int g = input_pixel(&src1[i*3+1]);
  1429. int b_r = input_pixel(&src1[i*3+2]);
  1430. dstU[i] = (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1431. dstV[i] = (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1432. }
  1433. }
  1434. static av_always_inline void
  1435. rgb48ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV,
  1436. const uint16_t *src1, const uint16_t *src2,
  1437. int width, enum PixelFormat origin)
  1438. {
  1439. int i;
  1440. assert(src1==src2);
  1441. for (i = 0; i < width; i++) {
  1442. int r_b = (input_pixel(&src1[6 * i + 0]) + input_pixel(&src1[6 * i + 3]) + 1) >> 1;
  1443. int g = (input_pixel(&src1[6 * i + 1]) + input_pixel(&src1[6 * i + 4]) + 1) >> 1;
  1444. int b_r = (input_pixel(&src1[6 * i + 2]) + input_pixel(&src1[6 * i + 5]) + 1) >> 1;
  1445. dstU[i]= (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1446. dstV[i]= (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
  1447. }
  1448. }
  1449. #undef r
  1450. #undef b
  1451. #undef input_pixel
  1452. #define rgb48funcs(pattern, BE_LE, origin) \
  1453. static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\
  1454. int width, uint32_t *unused) \
  1455. { \
  1456. const uint16_t *src = (const uint16_t *) _src; \
  1457. uint16_t *dst = (uint16_t *) _dst; \
  1458. rgb48ToY_c_template(dst, src, width, origin); \
  1459. } \
  1460. \
  1461. static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \
  1462. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  1463. int width, uint32_t *unused) \
  1464. { \
  1465. const uint16_t *src1 = (const uint16_t *) _src1, \
  1466. *src2 = (const uint16_t *) _src2; \
  1467. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  1468. rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \
  1469. } \
  1470. \
  1471. static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \
  1472. const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \
  1473. int width, uint32_t *unused) \
  1474. { \
  1475. const uint16_t *src1 = (const uint16_t *) _src1, \
  1476. *src2 = (const uint16_t *) _src2; \
  1477. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \
  1478. rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \
  1479. }
  1480. rgb48funcs(rgb, LE, PIX_FMT_RGB48LE);
  1481. rgb48funcs(rgb, BE, PIX_FMT_RGB48BE);
  1482. rgb48funcs(bgr, LE, PIX_FMT_BGR48LE);
  1483. rgb48funcs(bgr, BE, PIX_FMT_BGR48BE);
  1484. #define input_pixel(i) ((origin == PIX_FMT_RGBA || origin == PIX_FMT_BGRA || \
  1485. origin == PIX_FMT_ARGB || origin == PIX_FMT_ABGR) ? AV_RN32A(&src[(i)*4]) : \
  1486. (isBE(origin) ? AV_RB16(&src[(i)*2]) : AV_RL16(&src[(i)*2])))
  1487. static av_always_inline void
  1488. rgb16_32ToY_c_template(int16_t *dst, const uint8_t *src,
  1489. int width, enum PixelFormat origin,
  1490. int shr, int shg, int shb, int shp,
  1491. int maskr, int maskg, int maskb,
  1492. int rsh, int gsh, int bsh, int S)
  1493. {
  1494. const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh,
  1495. rnd = (32<<((S)-1)) + (1<<(S-7));
  1496. int i;
  1497. for (i = 0; i < width; i++) {
  1498. int px = input_pixel(i) >> shp;
  1499. int b = (px & maskb) >> shb;
  1500. int g = (px & maskg) >> shg;
  1501. int r = (px & maskr) >> shr;
  1502. dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6);
  1503. }
  1504. }
  1505. static av_always_inline void
  1506. rgb16_32ToUV_c_template(int16_t *dstU, int16_t *dstV,
  1507. const uint8_t *src, int width,
  1508. enum PixelFormat origin,
  1509. int shr, int shg, int shb, int shp,
  1510. int maskr, int maskg, int maskb,
  1511. int rsh, int gsh, int bsh, int S)
  1512. {
  1513. const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
  1514. rv = RV << rsh, gv = GV << gsh, bv = BV << bsh,
  1515. rnd = (256<<((S)-1)) + (1<<(S-7));
  1516. int i;
  1517. for (i = 0; i < width; i++) {
  1518. int px = input_pixel(i) >> shp;
  1519. int b = (px & maskb) >> shb;
  1520. int g = (px & maskg) >> shg;
  1521. int r = (px & maskr) >> shr;
  1522. dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6);
  1523. dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6);
  1524. }
  1525. }
  1526. static av_always_inline void
  1527. rgb16_32ToUV_half_c_template(int16_t *dstU, int16_t *dstV,
  1528. const uint8_t *src, int width,
  1529. enum PixelFormat origin,
  1530. int shr, int shg, int shb, int shp,
  1531. int maskr, int maskg, int maskb,
  1532. int rsh, int gsh, int bsh, int S)
  1533. {
  1534. const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh,
  1535. rv = RV << rsh, gv = GV << gsh, bv = BV << bsh,
  1536. rnd = (256U<<(S)) + (1<<(S-6)), maskgx = ~(maskr | maskb);
  1537. int i;
  1538. maskr |= maskr << 1; maskb |= maskb << 1; maskg |= maskg << 1;
  1539. for (i = 0; i < width; i++) {
  1540. int px0 = input_pixel(2 * i + 0) >> shp;
  1541. int px1 = input_pixel(2 * i + 1) >> shp;
  1542. int b, r, g = (px0 & maskgx) + (px1 & maskgx);
  1543. int rb = px0 + px1 - g;
  1544. b = (rb & maskb) >> shb;
  1545. if (shp || origin == PIX_FMT_BGR565LE || origin == PIX_FMT_BGR565BE ||
  1546. origin == PIX_FMT_RGB565LE || origin == PIX_FMT_RGB565BE) {
  1547. g >>= shg;
  1548. } else {
  1549. g = (g & maskg) >> shg;
  1550. }
  1551. r = (rb & maskr) >> shr;
  1552. dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1);
  1553. dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1);
  1554. }
  1555. }
  1556. #undef input_pixel
  1557. #define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \
  1558. maskg, maskb, rsh, gsh, bsh, S) \
  1559. static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \
  1560. int width, uint32_t *unused) \
  1561. { \
  1562. rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, \
  1563. shr, shg, shb, shp, \
  1564. maskr, maskg, maskb, rsh, gsh, bsh, S); \
  1565. } \
  1566. \
  1567. static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
  1568. const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
  1569. int width, uint32_t *unused) \
  1570. { \
  1571. rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  1572. shr, shg, shb, shp, \
  1573. maskr, maskg, maskb, rsh, gsh, bsh, S); \
  1574. } \
  1575. \
  1576. static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \
  1577. const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \
  1578. int width, uint32_t *unused) \
  1579. { \
  1580. rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \
  1581. shr, shg, shb, shp, \
  1582. maskr, maskg, maskb, rsh, gsh, bsh, S); \
  1583. }
  1584. rgb16_32_wrapper(PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8);
  1585. rgb16_32_wrapper(PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8);
  1586. rgb16_32_wrapper(PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8);
  1587. rgb16_32_wrapper(PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8);
  1588. rgb16_32_wrapper(PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8);
  1589. rgb16_32_wrapper(PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7);
  1590. rgb16_32_wrapper(PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8);
  1591. rgb16_32_wrapper(PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7);
  1592. rgb16_32_wrapper(PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8);
  1593. rgb16_32_wrapper(PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7);
  1594. rgb16_32_wrapper(PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8);
  1595. rgb16_32_wrapper(PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7);
  1596. static void gbr24pToY_c(uint16_t *dst, const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  1597. int width, uint32_t *unused)
  1598. {
  1599. int i;
  1600. for (i = 0; i < width; i++) {
  1601. unsigned int g = gsrc[i];
  1602. unsigned int b = bsrc[i];
  1603. unsigned int r = rsrc[i];
  1604. dst[i] = (RY*r + GY*g + BY*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  1605. }
  1606. }
  1607. static void gbr24pToUV_c(uint16_t *dstU, uint16_t *dstV,
  1608. const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  1609. int width, enum PixelFormat origin)
  1610. {
  1611. int i;
  1612. for (i = 0; i < width; i++) {
  1613. unsigned int g = gsrc[i];
  1614. unsigned int b = bsrc[i];
  1615. unsigned int r = rsrc[i];
  1616. dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  1617. dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6);
  1618. }
  1619. }
  1620. static void gbr24pToUV_half_c(uint16_t *dstU, uint16_t *dstV,
  1621. const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc,
  1622. int width, enum PixelFormat origin)
  1623. {
  1624. int i;
  1625. for (i = 0; i < width; i++) {
  1626. unsigned int g = gsrc[2*i] + gsrc[2*i+1];
  1627. unsigned int b = bsrc[2*i] + bsrc[2*i+1];
  1628. unsigned int r = rsrc[2*i] + rsrc[2*i+1];
  1629. dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  1630. dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1);
  1631. }
  1632. }
  1633. static void abgrToA_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  1634. {
  1635. int i;
  1636. for (i=0; i<width; i++) {
  1637. dst[i]= src[4*i]<<6;
  1638. }
  1639. }
  1640. static void rgbaToA_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  1641. {
  1642. int i;
  1643. for (i=0; i<width; i++) {
  1644. dst[i]= src[4*i+3]<<6;
  1645. }
  1646. }
  1647. static void palToA_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal)
  1648. {
  1649. int i;
  1650. for (i=0; i<width; i++) {
  1651. int d= src[i];
  1652. dst[i]= (pal[d] >> 24)<<6;
  1653. }
  1654. }
  1655. static void palToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, long width, uint32_t *pal)
  1656. {
  1657. int i;
  1658. for (i=0; i<width; i++) {
  1659. int d= src[i];
  1660. dst[i]= (pal[d] & 0xFF)<<6;
  1661. }
  1662. }
  1663. static void palToUV_c(uint16_t *dstU, int16_t *dstV,
  1664. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  1665. int width, uint32_t *pal)
  1666. {
  1667. int i;
  1668. assert(src1 == src2);
  1669. for (i=0; i<width; i++) {
  1670. int p= pal[src1[i]];
  1671. dstU[i]= (uint8_t)(p>> 8)<<6;
  1672. dstV[i]= (uint8_t)(p>>16)<<6;
  1673. }
  1674. }
  1675. static void monowhite2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  1676. {
  1677. int i, j;
  1678. for (i=0; i<width/8; i++) {
  1679. int d= ~src[i];
  1680. for(j=0; j<8; j++)
  1681. dst[8*i+j]= ((d>>(7-j))&1)*16383;
  1682. }
  1683. if(width&7){
  1684. int d= ~src[i];
  1685. for(j=0; j<(width&7); j++)
  1686. dst[8*i+j]= ((d>>(7-j))&1)*16383;
  1687. }
  1688. }
  1689. static void monoblack2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  1690. {
  1691. int i, j;
  1692. for (i=0; i<width/8; i++) {
  1693. int d= src[i];
  1694. for(j=0; j<8; j++)
  1695. dst[8*i+j]= ((d>>(7-j))&1)*16383;
  1696. }
  1697. if(width&7){
  1698. int d= src[i];
  1699. for(j=0; j<(width&7); j++)
  1700. dst[8*i+j]= ((d>>(7-j))&1)*16383;
  1701. }
  1702. }
  1703. //FIXME yuy2* can read up to 7 samples too much
  1704. static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  1705. uint32_t *unused)
  1706. {
  1707. int i;
  1708. for (i=0; i<width; i++)
  1709. dst[i]= src[2*i];
  1710. }
  1711. static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1712. const uint8_t *src2, int width, uint32_t *unused)
  1713. {
  1714. int i;
  1715. for (i=0; i<width; i++) {
  1716. dstU[i]= src1[4*i + 1];
  1717. dstV[i]= src1[4*i + 3];
  1718. }
  1719. assert(src1 == src2);
  1720. }
  1721. static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused)
  1722. {
  1723. int i;
  1724. const uint16_t *src = (const uint16_t *) _src;
  1725. uint16_t *dst = (uint16_t *) _dst;
  1726. for (i=0; i<width; i++) {
  1727. dst[i] = av_bswap16(src[i]);
  1728. }
  1729. }
  1730. static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1,
  1731. const uint8_t *_src2, int width, uint32_t *unused)
  1732. {
  1733. int i;
  1734. const uint16_t *src1 = (const uint16_t *) _src1,
  1735. *src2 = (const uint16_t *) _src2;
  1736. uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV;
  1737. for (i=0; i<width; i++) {
  1738. dstU[i] = av_bswap16(src1[i]);
  1739. dstV[i] = av_bswap16(src2[i]);
  1740. }
  1741. }
  1742. /* This is almost identical to the previous, end exists only because
  1743. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1744. static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  1745. uint32_t *unused)
  1746. {
  1747. int i;
  1748. for (i=0; i<width; i++)
  1749. dst[i]= src[2*i+1];
  1750. }
  1751. static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1752. const uint8_t *src2, int width, uint32_t *unused)
  1753. {
  1754. int i;
  1755. for (i=0; i<width; i++) {
  1756. dstU[i]= src1[4*i + 0];
  1757. dstV[i]= src1[4*i + 2];
  1758. }
  1759. assert(src1 == src2);
  1760. }
  1761. static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2,
  1762. const uint8_t *src, int width)
  1763. {
  1764. int i;
  1765. for (i = 0; i < width; i++) {
  1766. dst1[i] = src[2*i+0];
  1767. dst2[i] = src[2*i+1];
  1768. }
  1769. }
  1770. static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV,
  1771. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  1772. int width, uint32_t *unused)
  1773. {
  1774. nvXXtoUV_c(dstU, dstV, src1, width);
  1775. }
  1776. static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV,
  1777. const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2,
  1778. int width, uint32_t *unused)
  1779. {
  1780. nvXXtoUV_c(dstV, dstU, src1, width);
  1781. }
  1782. #define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
  1783. static void bgr24ToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2,
  1784. int width, uint32_t *unused)
  1785. {
  1786. int i;
  1787. for (i=0; i<width; i++) {
  1788. int b= src[i*3+0];
  1789. int g= src[i*3+1];
  1790. int r= src[i*3+2];
  1791. dst[i]= ((RY*r + GY*g + BY*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  1792. }
  1793. }
  1794. static void bgr24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1795. const uint8_t *src2, int width, uint32_t *unused)
  1796. {
  1797. int i;
  1798. for (i=0; i<width; i++) {
  1799. int b= src1[3*i + 0];
  1800. int g= src1[3*i + 1];
  1801. int r= src1[3*i + 2];
  1802. dstU[i]= (RU*r + GU*g + BU*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  1803. dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  1804. }
  1805. assert(src1 == src2);
  1806. }
  1807. static void bgr24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1808. const uint8_t *src2, int width, uint32_t *unused)
  1809. {
  1810. int i;
  1811. for (i=0; i<width; i++) {
  1812. int b= src1[6*i + 0] + src1[6*i + 3];
  1813. int g= src1[6*i + 1] + src1[6*i + 4];
  1814. int r= src1[6*i + 2] + src1[6*i + 5];
  1815. dstU[i]= (RU*r + GU*g + BU*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  1816. dstV[i]= (RV*r + GV*g + BV*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  1817. }
  1818. assert(src1 == src2);
  1819. }
  1820. static void rgb24ToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width,
  1821. uint32_t *unused)
  1822. {
  1823. int i;
  1824. for (i=0; i<width; i++) {
  1825. int r= src[i*3+0];
  1826. int g= src[i*3+1];
  1827. int b= src[i*3+2];
  1828. dst[i]= ((RY*r + GY*g + BY*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6));
  1829. }
  1830. }
  1831. static void rgb24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1832. const uint8_t *src2, int width, uint32_t *unused)
  1833. {
  1834. int i;
  1835. assert(src1==src2);
  1836. for (i=0; i<width; i++) {
  1837. int r= src1[3*i + 0];
  1838. int g= src1[3*i + 1];
  1839. int b= src1[3*i + 2];
  1840. dstU[i]= (RU*r + GU*g + BU*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  1841. dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6);
  1842. }
  1843. }
  1844. static void rgb24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1,
  1845. const uint8_t *src2, int width, uint32_t *unused)
  1846. {
  1847. int i;
  1848. assert(src1==src2);
  1849. for (i=0; i<width; i++) {
  1850. int r= src1[6*i + 0] + src1[6*i + 3];
  1851. int g= src1[6*i + 1] + src1[6*i + 4];
  1852. int b= src1[6*i + 2] + src1[6*i + 5];
  1853. dstU[i]= (RU*r + GU*g + BU*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  1854. dstV[i]= (RV*r + GV*g + BV*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5);
  1855. }
  1856. }
  1857. static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src,
  1858. const int16_t *filter,
  1859. const int16_t *filterPos, int filterSize)
  1860. {
  1861. int i;
  1862. int32_t *dst = (int32_t *) _dst;
  1863. const uint16_t *src = (const uint16_t *) _src;
  1864. int bits = av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
  1865. int sh = bits - 4;
  1866. if((isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8) && av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1<15)
  1867. sh= 9;
  1868. for (i = 0; i < dstW; i++) {
  1869. int j;
  1870. int srcPos = filterPos[i];
  1871. int val = 0;
  1872. for (j = 0; j < filterSize; j++) {
  1873. val += src[srcPos + j] * filter[filterSize * i + j];
  1874. }
  1875. // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
  1876. dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
  1877. }
  1878. }
  1879. static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src,
  1880. const int16_t *filter,
  1881. const int16_t *filterPos, int filterSize)
  1882. {
  1883. int i;
  1884. const uint16_t *src = (const uint16_t *) _src;
  1885. int sh = av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
  1886. if(sh<15)
  1887. sh= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
  1888. for (i = 0; i < dstW; i++) {
  1889. int j;
  1890. int srcPos = filterPos[i];
  1891. int val = 0;
  1892. for (j = 0; j < filterSize; j++) {
  1893. val += src[srcPos + j] * filter[filterSize * i + j];
  1894. }
  1895. // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
  1896. dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
  1897. }
  1898. }
  1899. // bilinear / bicubic scaling
  1900. static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src,
  1901. const int16_t *filter, const int16_t *filterPos,
  1902. int filterSize)
  1903. {
  1904. int i;
  1905. for (i=0; i<dstW; i++) {
  1906. int j;
  1907. int srcPos= filterPos[i];
  1908. int val=0;
  1909. for (j=0; j<filterSize; j++) {
  1910. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  1911. }
  1912. //filter += hFilterSize;
  1913. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  1914. //dst[i] = val>>7;
  1915. }
  1916. }
  1917. static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *src,
  1918. const int16_t *filter, const int16_t *filterPos,
  1919. int filterSize)
  1920. {
  1921. int i;
  1922. int32_t *dst = (int32_t *) _dst;
  1923. for (i=0; i<dstW; i++) {
  1924. int j;
  1925. int srcPos= filterPos[i];
  1926. int val=0;
  1927. for (j=0; j<filterSize; j++) {
  1928. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  1929. }
  1930. //filter += hFilterSize;
  1931. dst[i] = FFMIN(val>>3, (1<<19)-1); // the cubic equation does overflow ...
  1932. //dst[i] = val>>7;
  1933. }
  1934. }
  1935. //FIXME all pal and rgb srcFormats could do this convertion as well
  1936. //FIXME all scalers more complex than bilinear could do half of this transform
  1937. static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
  1938. {
  1939. int i;
  1940. for (i = 0; i < width; i++) {
  1941. dstU[i] = (FFMIN(dstU[i],30775)*4663 - 9289992)>>12; //-264
  1942. dstV[i] = (FFMIN(dstV[i],30775)*4663 - 9289992)>>12; //-264
  1943. }
  1944. }
  1945. static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
  1946. {
  1947. int i;
  1948. for (i = 0; i < width; i++) {
  1949. dstU[i] = (dstU[i]*1799 + 4081085)>>11; //1469
  1950. dstV[i] = (dstV[i]*1799 + 4081085)>>11; //1469
  1951. }
  1952. }
  1953. static void lumRangeToJpeg_c(int16_t *dst, int width)
  1954. {
  1955. int i;
  1956. for (i = 0; i < width; i++)
  1957. dst[i] = (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  1958. }
  1959. static void lumRangeFromJpeg_c(int16_t *dst, int width)
  1960. {
  1961. int i;
  1962. for (i = 0; i < width; i++)
  1963. dst[i] = (dst[i]*14071 + 33561947)>>14;
  1964. }
  1965. static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
  1966. {
  1967. int i;
  1968. int32_t *dstU = (int32_t *) _dstU;
  1969. int32_t *dstV = (int32_t *) _dstV;
  1970. for (i = 0; i < width; i++) {
  1971. dstU[i] = (FFMIN(dstU[i],30775<<4)*4663 - (9289992<<4))>>12; //-264
  1972. dstV[i] = (FFMIN(dstV[i],30775<<4)*4663 - (9289992<<4))>>12; //-264
  1973. }
  1974. }
  1975. static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
  1976. {
  1977. int i;
  1978. int32_t *dstU = (int32_t *) _dstU;
  1979. int32_t *dstV = (int32_t *) _dstV;
  1980. for (i = 0; i < width; i++) {
  1981. dstU[i] = (dstU[i]*1799 + (4081085<<4))>>11; //1469
  1982. dstV[i] = (dstV[i]*1799 + (4081085<<4))>>11; //1469
  1983. }
  1984. }
  1985. static void lumRangeToJpeg16_c(int16_t *_dst, int width)
  1986. {
  1987. int i;
  1988. int32_t *dst = (int32_t *) _dst;
  1989. for (i = 0; i < width; i++)
  1990. dst[i] = (FFMIN(dst[i],30189<<4)*4769 - (39057361<<2))>>12;
  1991. }
  1992. static void lumRangeFromJpeg16_c(int16_t *_dst, int width)
  1993. {
  1994. int i;
  1995. int32_t *dst = (int32_t *) _dst;
  1996. for (i = 0; i < width; i++)
  1997. dst[i] = (dst[i]*(14071/4) + (33561947<<4)/4)>>12;
  1998. }
  1999. static void hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth,
  2000. const uint8_t *src, int srcW, int xInc)
  2001. {
  2002. int i;
  2003. unsigned int xpos=0;
  2004. for (i=0;i<dstWidth;i++) {
  2005. register unsigned int xx=xpos>>16;
  2006. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2007. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2008. xpos+=xInc;
  2009. }
  2010. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2011. dst[i] = src[srcW-1]*128;
  2012. }
  2013. // *** horizontal scale Y line to temp buffer
  2014. static av_always_inline void hyscale(SwsContext *c, int16_t *dst, int dstWidth,
  2015. const uint8_t *src, const uint8_t *src2, const uint8_t *src3,
  2016. int srcW, int xInc,
  2017. const int16_t *hLumFilter,
  2018. const int16_t *hLumFilterPos, int hLumFilterSize,
  2019. uint8_t *formatConvBuffer,
  2020. uint32_t *pal, int isAlpha)
  2021. {
  2022. void (*toYV12)(uint8_t *, const uint8_t *, const uint8_t *, const uint8_t *, int, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12;
  2023. void (*convertRange)(int16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
  2024. if (toYV12) {
  2025. toYV12(formatConvBuffer, src, src2, src3, srcW, pal);
  2026. src= formatConvBuffer;
  2027. }
  2028. if (!c->hyscale_fast) {
  2029. c->hyScale(c, dst, dstWidth, src, hLumFilter, hLumFilterPos, hLumFilterSize);
  2030. } else { // fast bilinear upscale / crap downscale
  2031. c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
  2032. }
  2033. if (convertRange)
  2034. convertRange(dst, dstWidth);
  2035. }
  2036. static void hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2,
  2037. int dstWidth, const uint8_t *src1,
  2038. const uint8_t *src2, int srcW, int xInc)
  2039. {
  2040. int i;
  2041. unsigned int xpos=0;
  2042. for (i=0;i<dstWidth;i++) {
  2043. register unsigned int xx=xpos>>16;
  2044. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2045. dst1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2046. dst2[i]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2047. xpos+=xInc;
  2048. }
  2049. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  2050. dst1[i] = src1[srcW-1]*128;
  2051. dst2[i] = src2[srcW-1]*128;
  2052. }
  2053. }
  2054. static av_always_inline void hcscale(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth,
  2055. const uint8_t *src0, const uint8_t *src1, const uint8_t *src2,
  2056. int srcW, int xInc, const int16_t *hChrFilter,
  2057. const int16_t *hChrFilterPos, int hChrFilterSize,
  2058. uint8_t *formatConvBuffer, uint32_t *pal)
  2059. {
  2060. if (c->chrToYV12) {
  2061. uint8_t *buf2 = formatConvBuffer + FFALIGN(srcW*2+78, 16);
  2062. c->chrToYV12(formatConvBuffer, buf2, src0, src1, src2, srcW, pal);
  2063. src1= formatConvBuffer;
  2064. src2= buf2;
  2065. }
  2066. if (!c->hcscale_fast) {
  2067. c->hcScale(c, dst1, dstWidth, src1, hChrFilter, hChrFilterPos, hChrFilterSize);
  2068. c->hcScale(c, dst2, dstWidth, src2, hChrFilter, hChrFilterPos, hChrFilterSize);
  2069. } else { // fast bilinear upscale / crap downscale
  2070. c->hcscale_fast(c, dst1, dst2, dstWidth, src1, src2, srcW, xInc);
  2071. }
  2072. if (c->chrConvertRange)
  2073. c->chrConvertRange(dst1, dst2, dstWidth);
  2074. }
  2075. static av_always_inline void
  2076. find_c_packed_planar_out_funcs(SwsContext *c,
  2077. yuv2planar1_fn *yuv2yuv1, yuv2planarX_fn *yuv2yuvX,
  2078. yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2,
  2079. yuv2packedX_fn *yuv2packedX)
  2080. {
  2081. enum PixelFormat dstFormat = c->dstFormat;
  2082. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
  2083. *yuv2yuvX = yuv2nv12X_c;
  2084. } else if (is16BPS(dstFormat)) {
  2085. *yuv2yuvX = isBE(dstFormat) ? yuv2yuvX16BE_c : yuv2yuvX16LE_c;
  2086. } else if (is9_OR_10BPS(dstFormat)) {
  2087. if (av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1 == 8) {
  2088. *yuv2yuvX = isBE(dstFormat) ? yuv2yuvX9BE_c : yuv2yuvX9LE_c;
  2089. } else {
  2090. *yuv2yuvX = isBE(dstFormat) ? yuv2yuvX10BE_c : yuv2yuvX10LE_c;
  2091. }
  2092. } else {
  2093. *yuv2yuv1 = yuv2yuv1_c;
  2094. *yuv2yuvX = yuv2yuvX_c;
  2095. }
  2096. if(c->flags & SWS_FULL_CHR_H_INT) {
  2097. switch (dstFormat) {
  2098. case PIX_FMT_RGBA:
  2099. #if CONFIG_SMALL
  2100. *yuv2packedX = yuv2rgba32_full_X_c;
  2101. #else
  2102. #if CONFIG_SWSCALE_ALPHA
  2103. if (c->alpPixBuf) {
  2104. *yuv2packedX = yuv2rgba32_full_X_c;
  2105. } else
  2106. #endif /* CONFIG_SWSCALE_ALPHA */
  2107. {
  2108. *yuv2packedX = yuv2rgbx32_full_X_c;
  2109. }
  2110. #endif /* !CONFIG_SMALL */
  2111. break;
  2112. case PIX_FMT_ARGB:
  2113. #if CONFIG_SMALL
  2114. *yuv2packedX = yuv2argb32_full_X_c;
  2115. #else
  2116. #if CONFIG_SWSCALE_ALPHA
  2117. if (c->alpPixBuf) {
  2118. *yuv2packedX = yuv2argb32_full_X_c;
  2119. } else
  2120. #endif /* CONFIG_SWSCALE_ALPHA */
  2121. {
  2122. *yuv2packedX = yuv2xrgb32_full_X_c;
  2123. }
  2124. #endif /* !CONFIG_SMALL */
  2125. break;
  2126. case PIX_FMT_BGRA:
  2127. #if CONFIG_SMALL
  2128. *yuv2packedX = yuv2bgra32_full_X_c;
  2129. #else
  2130. #if CONFIG_SWSCALE_ALPHA
  2131. if (c->alpPixBuf) {
  2132. *yuv2packedX = yuv2bgra32_full_X_c;
  2133. } else
  2134. #endif /* CONFIG_SWSCALE_ALPHA */
  2135. {
  2136. *yuv2packedX = yuv2bgrx32_full_X_c;
  2137. }
  2138. #endif /* !CONFIG_SMALL */
  2139. break;
  2140. case PIX_FMT_ABGR:
  2141. #if CONFIG_SMALL
  2142. *yuv2packedX = yuv2abgr32_full_X_c;
  2143. #else
  2144. #if CONFIG_SWSCALE_ALPHA
  2145. if (c->alpPixBuf) {
  2146. *yuv2packedX = yuv2abgr32_full_X_c;
  2147. } else
  2148. #endif /* CONFIG_SWSCALE_ALPHA */
  2149. {
  2150. *yuv2packedX = yuv2xbgr32_full_X_c;
  2151. }
  2152. #endif /* !CONFIG_SMALL */
  2153. break;
  2154. case PIX_FMT_RGB24:
  2155. *yuv2packedX = yuv2rgb24_full_X_c;
  2156. break;
  2157. case PIX_FMT_BGR24:
  2158. *yuv2packedX = yuv2bgr24_full_X_c;
  2159. break;
  2160. }
  2161. if(!*yuv2packedX)
  2162. goto YUV_PACKED;
  2163. } else {
  2164. YUV_PACKED:
  2165. switch (dstFormat) {
  2166. case PIX_FMT_GRAY16BE:
  2167. *yuv2packed1 = yuv2gray16BE_1_c;
  2168. *yuv2packed2 = yuv2gray16BE_2_c;
  2169. *yuv2packedX = yuv2gray16BE_X_c;
  2170. break;
  2171. case PIX_FMT_GRAY16LE:
  2172. *yuv2packed1 = yuv2gray16LE_1_c;
  2173. *yuv2packed2 = yuv2gray16LE_2_c;
  2174. *yuv2packedX = yuv2gray16LE_X_c;
  2175. break;
  2176. case PIX_FMT_MONOWHITE:
  2177. *yuv2packed1 = yuv2monowhite_1_c;
  2178. *yuv2packed2 = yuv2monowhite_2_c;
  2179. *yuv2packedX = yuv2monowhite_X_c;
  2180. break;
  2181. case PIX_FMT_MONOBLACK:
  2182. *yuv2packed1 = yuv2monoblack_1_c;
  2183. *yuv2packed2 = yuv2monoblack_2_c;
  2184. *yuv2packedX = yuv2monoblack_X_c;
  2185. break;
  2186. case PIX_FMT_YUYV422:
  2187. *yuv2packed1 = yuv2yuyv422_1_c;
  2188. *yuv2packed2 = yuv2yuyv422_2_c;
  2189. *yuv2packedX = yuv2yuyv422_X_c;
  2190. break;
  2191. case PIX_FMT_UYVY422:
  2192. *yuv2packed1 = yuv2uyvy422_1_c;
  2193. *yuv2packed2 = yuv2uyvy422_2_c;
  2194. *yuv2packedX = yuv2uyvy422_X_c;
  2195. break;
  2196. case PIX_FMT_RGB48LE:
  2197. *yuv2packed1 = yuv2rgb48le_1_c;
  2198. *yuv2packed2 = yuv2rgb48le_2_c;
  2199. *yuv2packedX = yuv2rgb48le_X_c;
  2200. break;
  2201. case PIX_FMT_RGB48BE:
  2202. *yuv2packed1 = yuv2rgb48be_1_c;
  2203. *yuv2packed2 = yuv2rgb48be_2_c;
  2204. *yuv2packedX = yuv2rgb48be_X_c;
  2205. break;
  2206. case PIX_FMT_BGR48LE:
  2207. *yuv2packed1 = yuv2bgr48le_1_c;
  2208. *yuv2packed2 = yuv2bgr48le_2_c;
  2209. *yuv2packedX = yuv2bgr48le_X_c;
  2210. break;
  2211. case PIX_FMT_BGR48BE:
  2212. *yuv2packed1 = yuv2bgr48be_1_c;
  2213. *yuv2packed2 = yuv2bgr48be_2_c;
  2214. *yuv2packedX = yuv2bgr48be_X_c;
  2215. break;
  2216. case PIX_FMT_RGB32:
  2217. case PIX_FMT_BGR32:
  2218. #if CONFIG_SMALL
  2219. *yuv2packed1 = yuv2rgb32_1_c;
  2220. *yuv2packed2 = yuv2rgb32_2_c;
  2221. *yuv2packedX = yuv2rgb32_X_c;
  2222. #else
  2223. #if CONFIG_SWSCALE_ALPHA
  2224. if (c->alpPixBuf) {
  2225. *yuv2packed1 = yuv2rgba32_1_c;
  2226. *yuv2packed2 = yuv2rgba32_2_c;
  2227. *yuv2packedX = yuv2rgba32_X_c;
  2228. } else
  2229. #endif /* CONFIG_SWSCALE_ALPHA */
  2230. {
  2231. *yuv2packed1 = yuv2rgbx32_1_c;
  2232. *yuv2packed2 = yuv2rgbx32_2_c;
  2233. *yuv2packedX = yuv2rgbx32_X_c;
  2234. }
  2235. #endif /* !CONFIG_SMALL */
  2236. break;
  2237. case PIX_FMT_RGB32_1:
  2238. case PIX_FMT_BGR32_1:
  2239. #if CONFIG_SMALL
  2240. *yuv2packed1 = yuv2rgb32_1_1_c;
  2241. *yuv2packed2 = yuv2rgb32_1_2_c;
  2242. *yuv2packedX = yuv2rgb32_1_X_c;
  2243. #else
  2244. #if CONFIG_SWSCALE_ALPHA
  2245. if (c->alpPixBuf) {
  2246. *yuv2packed1 = yuv2rgba32_1_1_c;
  2247. *yuv2packed2 = yuv2rgba32_1_2_c;
  2248. *yuv2packedX = yuv2rgba32_1_X_c;
  2249. } else
  2250. #endif /* CONFIG_SWSCALE_ALPHA */
  2251. {
  2252. *yuv2packed1 = yuv2rgbx32_1_1_c;
  2253. *yuv2packed2 = yuv2rgbx32_1_2_c;
  2254. *yuv2packedX = yuv2rgbx32_1_X_c;
  2255. }
  2256. #endif /* !CONFIG_SMALL */
  2257. break;
  2258. case PIX_FMT_RGB24:
  2259. *yuv2packed1 = yuv2rgb24_1_c;
  2260. *yuv2packed2 = yuv2rgb24_2_c;
  2261. *yuv2packedX = yuv2rgb24_X_c;
  2262. break;
  2263. case PIX_FMT_BGR24:
  2264. *yuv2packed1 = yuv2bgr24_1_c;
  2265. *yuv2packed2 = yuv2bgr24_2_c;
  2266. *yuv2packedX = yuv2bgr24_X_c;
  2267. break;
  2268. case PIX_FMT_RGB565LE:
  2269. case PIX_FMT_RGB565BE:
  2270. case PIX_FMT_BGR565LE:
  2271. case PIX_FMT_BGR565BE:
  2272. *yuv2packed1 = yuv2rgb16_1_c;
  2273. *yuv2packed2 = yuv2rgb16_2_c;
  2274. *yuv2packedX = yuv2rgb16_X_c;
  2275. break;
  2276. case PIX_FMT_RGB555LE:
  2277. case PIX_FMT_RGB555BE:
  2278. case PIX_FMT_BGR555LE:
  2279. case PIX_FMT_BGR555BE:
  2280. *yuv2packed1 = yuv2rgb15_1_c;
  2281. *yuv2packed2 = yuv2rgb15_2_c;
  2282. *yuv2packedX = yuv2rgb15_X_c;
  2283. break;
  2284. case PIX_FMT_RGB444LE:
  2285. case PIX_FMT_RGB444BE:
  2286. case PIX_FMT_BGR444LE:
  2287. case PIX_FMT_BGR444BE:
  2288. *yuv2packed1 = yuv2rgb12_1_c;
  2289. *yuv2packed2 = yuv2rgb12_2_c;
  2290. *yuv2packedX = yuv2rgb12_X_c;
  2291. break;
  2292. case PIX_FMT_RGB8:
  2293. case PIX_FMT_BGR8:
  2294. *yuv2packed1 = yuv2rgb8_1_c;
  2295. *yuv2packed2 = yuv2rgb8_2_c;
  2296. *yuv2packedX = yuv2rgb8_X_c;
  2297. break;
  2298. case PIX_FMT_RGB4:
  2299. case PIX_FMT_BGR4:
  2300. *yuv2packed1 = yuv2rgb4_1_c;
  2301. *yuv2packed2 = yuv2rgb4_2_c;
  2302. *yuv2packedX = yuv2rgb4_X_c;
  2303. break;
  2304. case PIX_FMT_RGB4_BYTE:
  2305. case PIX_FMT_BGR4_BYTE:
  2306. *yuv2packed1 = yuv2rgb4b_1_c;
  2307. *yuv2packed2 = yuv2rgb4b_2_c;
  2308. *yuv2packedX = yuv2rgb4b_X_c;
  2309. break;
  2310. }
  2311. }
  2312. }
  2313. #define DEBUG_SWSCALE_BUFFERS 0
  2314. #define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
  2315. static int swScale(SwsContext *c, const uint8_t* src[],
  2316. int srcStride[], int srcSliceY,
  2317. int srcSliceH, uint8_t* dst[], int dstStride[])
  2318. {
  2319. /* load a few things into local vars to make the code more readable? and faster */
  2320. const int srcW= c->srcW;
  2321. const int dstW= c->dstW;
  2322. const int dstH= c->dstH;
  2323. const int chrDstW= c->chrDstW;
  2324. const int chrSrcW= c->chrSrcW;
  2325. const int lumXInc= c->lumXInc;
  2326. const int chrXInc= c->chrXInc;
  2327. const enum PixelFormat dstFormat= c->dstFormat;
  2328. const int flags= c->flags;
  2329. int16_t *vLumFilterPos= c->vLumFilterPos;
  2330. int16_t *vChrFilterPos= c->vChrFilterPos;
  2331. int16_t *hLumFilterPos= c->hLumFilterPos;
  2332. int16_t *hChrFilterPos= c->hChrFilterPos;
  2333. int16_t *vLumFilter= c->vLumFilter;
  2334. int16_t *vChrFilter= c->vChrFilter;
  2335. int16_t *hLumFilter= c->hLumFilter;
  2336. int16_t *hChrFilter= c->hChrFilter;
  2337. int32_t *lumMmxFilter= c->lumMmxFilter;
  2338. int32_t *chrMmxFilter= c->chrMmxFilter;
  2339. int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
  2340. const int vLumFilterSize= c->vLumFilterSize;
  2341. const int vChrFilterSize= c->vChrFilterSize;
  2342. const int hLumFilterSize= c->hLumFilterSize;
  2343. const int hChrFilterSize= c->hChrFilterSize;
  2344. int16_t **lumPixBuf= c->lumPixBuf;
  2345. int16_t **chrUPixBuf= c->chrUPixBuf;
  2346. int16_t **chrVPixBuf= c->chrVPixBuf;
  2347. int16_t **alpPixBuf= c->alpPixBuf;
  2348. const int vLumBufSize= c->vLumBufSize;
  2349. const int vChrBufSize= c->vChrBufSize;
  2350. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2351. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2352. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2353. int lastDstY;
  2354. uint32_t *pal=c->pal_yuv;
  2355. int should_dither= isNBPS(c->srcFormat) || is16BPS(c->srcFormat);
  2356. yuv2planar1_fn yuv2yuv1 = c->yuv2yuv1;
  2357. yuv2planarX_fn yuv2yuvX = c->yuv2yuvX;
  2358. yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
  2359. yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
  2360. yuv2packedX_fn yuv2packedX = c->yuv2packedX;
  2361. /* vars which will change and which we need to store back in the context */
  2362. int dstY= c->dstY;
  2363. int lumBufIndex= c->lumBufIndex;
  2364. int chrBufIndex= c->chrBufIndex;
  2365. int lastInLumBuf= c->lastInLumBuf;
  2366. int lastInChrBuf= c->lastInChrBuf;
  2367. if (isPacked(c->srcFormat)) {
  2368. src[0]=
  2369. src[1]=
  2370. src[2]=
  2371. src[3]= src[0];
  2372. srcStride[0]=
  2373. srcStride[1]=
  2374. srcStride[2]=
  2375. srcStride[3]= srcStride[0];
  2376. }
  2377. srcStride[1]<<= c->vChrDrop;
  2378. srcStride[2]<<= c->vChrDrop;
  2379. DEBUG_BUFFERS("swScale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
  2380. src[0], srcStride[0], src[1], srcStride[1], src[2], srcStride[2], src[3], srcStride[3],
  2381. dst[0], dstStride[0], dst[1], dstStride[1], dst[2], dstStride[2], dst[3], dstStride[3]);
  2382. DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
  2383. srcSliceY, srcSliceH, dstY, dstH);
  2384. DEBUG_BUFFERS("vLumFilterSize: %d vLumBufSize: %d vChrFilterSize: %d vChrBufSize: %d\n",
  2385. vLumFilterSize, vLumBufSize, vChrFilterSize, vChrBufSize);
  2386. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0) {
  2387. static int warnedAlready=0; //FIXME move this into the context perhaps
  2388. if (flags & SWS_PRINT_INFO && !warnedAlready) {
  2389. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2390. " ->cannot do aligned memory accesses anymore\n");
  2391. warnedAlready=1;
  2392. }
  2393. }
  2394. /* Note the user might start scaling the picture in the middle so this
  2395. will not get executed. This is not really intended but works
  2396. currently, so people might do it. */
  2397. if (srcSliceY ==0) {
  2398. lumBufIndex=-1;
  2399. chrBufIndex=-1;
  2400. dstY=0;
  2401. lastInLumBuf= -1;
  2402. lastInChrBuf= -1;
  2403. }
  2404. if (!should_dither) {
  2405. c->chrDither8 = c->lumDither8 = ff_sws_pb_64;
  2406. }
  2407. lastDstY= dstY;
  2408. for (;dstY < dstH; dstY++) {
  2409. const int chrDstY= dstY>>c->chrDstVSubSample;
  2410. uint8_t *dest[4] = {
  2411. dst[0] + dstStride[0] * dstY,
  2412. dst[1] + dstStride[1] * chrDstY,
  2413. dst[2] + dstStride[2] * chrDstY,
  2414. (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3] + dstStride[3] * dstY : NULL,
  2415. };
  2416. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2417. const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)];
  2418. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2419. int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2420. int lastLumSrcY2=firstLumSrcY2+ vLumFilterSize -1; // Last line needed as input
  2421. int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2422. int enough_lines;
  2423. //handle holes (FAST_BILINEAR & weird filters)
  2424. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2425. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2426. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2427. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2428. DEBUG_BUFFERS("dstY: %d\n", dstY);
  2429. DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
  2430. firstLumSrcY, lastLumSrcY, lastInLumBuf);
  2431. DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
  2432. firstChrSrcY, lastChrSrcY, lastInChrBuf);
  2433. // Do we have enough lines in this slice to output the dstY line
  2434. enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample);
  2435. if (!enough_lines) {
  2436. lastLumSrcY = srcSliceY + srcSliceH - 1;
  2437. lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
  2438. DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
  2439. lastLumSrcY, lastChrSrcY);
  2440. }
  2441. //Do horizontal scaling
  2442. while(lastInLumBuf < lastLumSrcY) {
  2443. const uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2444. const uint8_t *src2= src[1]+(lastInLumBuf + 1 - srcSliceY)*srcStride[1];
  2445. const uint8_t *src3= src[2]+(lastInLumBuf + 1 - srcSliceY)*srcStride[2];
  2446. const uint8_t *src4= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
  2447. lumBufIndex++;
  2448. assert(lumBufIndex < 2*vLumBufSize);
  2449. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2450. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2451. hyscale(c, lumPixBuf[ lumBufIndex ], dstW, src1, src2, src3, srcW, lumXInc,
  2452. hLumFilter, hLumFilterPos, hLumFilterSize,
  2453. formatConvBuffer,
  2454. pal, 0);
  2455. if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
  2456. hyscale(c, alpPixBuf[ lumBufIndex ], dstW, src4, NULL, NULL, srcW,
  2457. lumXInc, hLumFilter, hLumFilterPos, hLumFilterSize,
  2458. formatConvBuffer,
  2459. pal, 1);
  2460. lastInLumBuf++;
  2461. DEBUG_BUFFERS("\t\tlumBufIndex %d: lastInLumBuf: %d\n",
  2462. lumBufIndex, lastInLumBuf);
  2463. }
  2464. while(lastInChrBuf < lastChrSrcY) {
  2465. const uint8_t *src0= src[0]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[0];
  2466. const uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2467. const uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2468. chrBufIndex++;
  2469. assert(chrBufIndex < 2*vChrBufSize);
  2470. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2471. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2472. //FIXME replace parameters through context struct (some at least)
  2473. if (c->needs_hcscale)
  2474. hcscale(c, chrUPixBuf[chrBufIndex], chrVPixBuf[chrBufIndex],
  2475. chrDstW, src0, src1, src2, chrSrcW, chrXInc,
  2476. hChrFilter, hChrFilterPos, hChrFilterSize,
  2477. formatConvBuffer, pal);
  2478. lastInChrBuf++;
  2479. DEBUG_BUFFERS("\t\tchrBufIndex %d: lastInChrBuf: %d\n",
  2480. chrBufIndex, lastInChrBuf);
  2481. }
  2482. //wrap buf index around to stay inside the ring buffer
  2483. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2484. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2485. if (!enough_lines)
  2486. break; //we can't output a dstY line so let's try with the next slice
  2487. #if HAVE_MMX
  2488. updateMMXDitherTables(c, dstY, lumBufIndex, chrBufIndex, lastInLumBuf, lastInChrBuf);
  2489. #endif
  2490. if (should_dither) {
  2491. c->chrDither8 = dither_8x8_128[chrDstY & 7];
  2492. c->lumDither8 = dither_8x8_128[dstY & 7];
  2493. }
  2494. if (dstY >= dstH-2) {
  2495. // hmm looks like we can't use MMX here without overwriting this array's tail
  2496. find_c_packed_planar_out_funcs(c, &yuv2yuv1, &yuv2yuvX,
  2497. &yuv2packed1, &yuv2packed2,
  2498. &yuv2packedX);
  2499. }
  2500. {
  2501. const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2502. const int16_t **chrUSrcPtr= (const int16_t **) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2503. const int16_t **chrVSrcPtr= (const int16_t **) chrVPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2504. const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
  2505. if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like
  2506. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2507. if ((dstY&chrSkipMask) || isGray(dstFormat))
  2508. dest[1] = dest[2] = NULL; //FIXME split functions in lumi / chromi
  2509. if (c->yuv2yuv1 && vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12
  2510. const int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL;
  2511. yuv2yuv1(c, lumSrcPtr[0], chrUSrcPtr[0], chrVSrcPtr[0], alpBuf,
  2512. dest, dstW, chrDstW);
  2513. } else { //General YV12
  2514. yuv2yuvX(c, vLumFilter + dstY * vLumFilterSize,
  2515. lumSrcPtr, vLumFilterSize,
  2516. vChrFilter + chrDstY * vChrFilterSize,
  2517. chrUSrcPtr, chrVSrcPtr, vChrFilterSize,
  2518. alpSrcPtr, dest, dstW, chrDstW);
  2519. }
  2520. } else {
  2521. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2522. assert(chrUSrcPtr + vChrFilterSize - 1 < chrUPixBuf + vChrBufSize*2);
  2523. if (c->yuv2packed1 && vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB
  2524. int chrAlpha = vChrFilter[2 * dstY + 1];
  2525. yuv2packed1(c, *lumSrcPtr, chrUSrcPtr, chrVSrcPtr,
  2526. alpPixBuf ? *alpSrcPtr : NULL,
  2527. dest[0], dstW, chrAlpha, dstY);
  2528. } else if (c->yuv2packed2 && vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB
  2529. int lumAlpha = vLumFilter[2 * dstY + 1];
  2530. int chrAlpha = vChrFilter[2 * dstY + 1];
  2531. lumMmxFilter[2] =
  2532. lumMmxFilter[3] = vLumFilter[2 * dstY ] * 0x10001;
  2533. chrMmxFilter[2] =
  2534. chrMmxFilter[3] = vChrFilter[2 * chrDstY] * 0x10001;
  2535. yuv2packed2(c, lumSrcPtr, chrUSrcPtr, chrVSrcPtr,
  2536. alpPixBuf ? alpSrcPtr : NULL,
  2537. dest[0], dstW, lumAlpha, chrAlpha, dstY);
  2538. } else { //general RGB
  2539. yuv2packedX(c, vLumFilter + dstY * vLumFilterSize,
  2540. lumSrcPtr, vLumFilterSize,
  2541. vChrFilter + dstY * vChrFilterSize,
  2542. chrUSrcPtr, chrVSrcPtr, vChrFilterSize,
  2543. alpSrcPtr, dest[0], dstW, dstY);
  2544. }
  2545. }
  2546. }
  2547. }
  2548. if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
  2549. fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
  2550. #if HAVE_MMX2
  2551. if (av_get_cpu_flags() & AV_CPU_FLAG_MMX2)
  2552. __asm__ volatile("sfence":::"memory");
  2553. #endif
  2554. emms_c();
  2555. /* store changed local vars back in the context */
  2556. c->dstY= dstY;
  2557. c->lumBufIndex= lumBufIndex;
  2558. c->chrBufIndex= chrBufIndex;
  2559. c->lastInLumBuf= lastInLumBuf;
  2560. c->lastInChrBuf= lastInChrBuf;
  2561. return dstY - lastDstY;
  2562. }
  2563. static av_cold void sws_init_swScale_c(SwsContext *c)
  2564. {
  2565. enum PixelFormat srcFormat = c->srcFormat;
  2566. find_c_packed_planar_out_funcs(c, &c->yuv2yuv1, &c->yuv2yuvX,
  2567. &c->yuv2packed1, &c->yuv2packed2,
  2568. &c->yuv2packedX);
  2569. c->chrToYV12 = NULL;
  2570. switch(srcFormat) {
  2571. case PIX_FMT_YUYV422 : c->chrToYV12 = yuy2ToUV_c; break;
  2572. case PIX_FMT_UYVY422 : c->chrToYV12 = uyvyToUV_c; break;
  2573. case PIX_FMT_NV12 : c->chrToYV12 = nv12ToUV_c; break;
  2574. case PIX_FMT_NV21 : c->chrToYV12 = nv21ToUV_c; break;
  2575. case PIX_FMT_RGB8 :
  2576. case PIX_FMT_BGR8 :
  2577. case PIX_FMT_PAL8 :
  2578. case PIX_FMT_BGR4_BYTE:
  2579. case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV_c; break;
  2580. #if HAVE_BIGENDIAN
  2581. case PIX_FMT_YUV444P9LE:
  2582. case PIX_FMT_YUV420P9LE:
  2583. case PIX_FMT_YUV422P10LE:
  2584. case PIX_FMT_YUV420P10LE:
  2585. case PIX_FMT_YUV444P10LE:
  2586. case PIX_FMT_YUV420P16LE:
  2587. case PIX_FMT_YUV422P16LE:
  2588. case PIX_FMT_YUV444P16LE: c->chrToYV12 = bswap16UV_c; break;
  2589. #else
  2590. case PIX_FMT_YUV444P9BE:
  2591. case PIX_FMT_YUV420P9BE:
  2592. case PIX_FMT_YUV444P10BE:
  2593. case PIX_FMT_YUV422P10BE:
  2594. case PIX_FMT_YUV420P10BE:
  2595. case PIX_FMT_YUV420P16BE:
  2596. case PIX_FMT_YUV422P16BE:
  2597. case PIX_FMT_YUV444P16BE: c->chrToYV12 = bswap16UV_c; break;
  2598. #endif
  2599. }
  2600. if (c->chrSrcHSubSample) {
  2601. switch(srcFormat) {
  2602. case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_half_c; break;
  2603. case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_half_c; break;
  2604. case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_half_c; break;
  2605. case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_half_c; break;
  2606. case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_half_c; break;
  2607. case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_half_c; break;
  2608. case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_half_c; break;
  2609. case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_half_c; break;
  2610. case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_half_c; break;
  2611. case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_half_c; break;
  2612. case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_half_c; break;
  2613. case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_half_c; break;
  2614. case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_half_c; break;
  2615. case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_half_c; break;
  2616. case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_half_c; break;
  2617. case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_half_c; break;
  2618. case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_half_c; break;
  2619. case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_half_c; break;
  2620. case PIX_FMT_GBR24P : c->chrToYV12 = gbr24pToUV_half_c; break;
  2621. }
  2622. } else {
  2623. switch(srcFormat) {
  2624. case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_c; break;
  2625. case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_c; break;
  2626. case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_c; break;
  2627. case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_c; break;
  2628. case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_c; break;
  2629. case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_c; break;
  2630. case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_c; break;
  2631. case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_c; break;
  2632. case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_c; break;
  2633. case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_c; break;
  2634. case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_c; break;
  2635. case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_c; break;
  2636. case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_c; break;
  2637. case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_c; break;
  2638. case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_c; break;
  2639. case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_c; break;
  2640. case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_c; break;
  2641. case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_c; break;
  2642. case PIX_FMT_GBR24P : c->chrToYV12 = gbr24pToUV_c; break;
  2643. }
  2644. }
  2645. c->lumToYV12 = NULL;
  2646. c->alpToYV12 = NULL;
  2647. switch (srcFormat) {
  2648. #if HAVE_BIGENDIAN
  2649. case PIX_FMT_YUV444P9LE:
  2650. case PIX_FMT_YUV420P9LE:
  2651. case PIX_FMT_YUV422P10LE:
  2652. case PIX_FMT_YUV420P10LE:
  2653. case PIX_FMT_YUV444P10LE:
  2654. case PIX_FMT_YUV420P16LE:
  2655. case PIX_FMT_YUV422P16LE:
  2656. case PIX_FMT_YUV444P16LE:
  2657. case PIX_FMT_GRAY16LE: c->lumToYV12 = bswap16Y_c; break;
  2658. #else
  2659. case PIX_FMT_YUV444P9BE:
  2660. case PIX_FMT_YUV420P9BE:
  2661. case PIX_FMT_YUV444P10BE:
  2662. case PIX_FMT_YUV422P10BE:
  2663. case PIX_FMT_YUV420P10BE:
  2664. case PIX_FMT_YUV420P16BE:
  2665. case PIX_FMT_YUV422P16BE:
  2666. case PIX_FMT_YUV444P16BE:
  2667. case PIX_FMT_GRAY16BE: c->lumToYV12 = bswap16Y_c; break;
  2668. #endif
  2669. case PIX_FMT_YUYV422 :
  2670. case PIX_FMT_Y400A : c->lumToYV12 = yuy2ToY_c; break;
  2671. case PIX_FMT_UYVY422 : c->lumToYV12 = uyvyToY_c; break;
  2672. case PIX_FMT_BGR24 : c->lumToYV12 = bgr24ToY_c; break;
  2673. case PIX_FMT_BGR565LE : c->lumToYV12 = bgr16leToY_c; break;
  2674. case PIX_FMT_BGR565BE : c->lumToYV12 = bgr16beToY_c; break;
  2675. case PIX_FMT_BGR555LE : c->lumToYV12 = bgr15leToY_c; break;
  2676. case PIX_FMT_BGR555BE : c->lumToYV12 = bgr15beToY_c; break;
  2677. case PIX_FMT_RGB24 : c->lumToYV12 = rgb24ToY_c; break;
  2678. case PIX_FMT_RGB565LE : c->lumToYV12 = rgb16leToY_c; break;
  2679. case PIX_FMT_RGB565BE : c->lumToYV12 = rgb16beToY_c; break;
  2680. case PIX_FMT_RGB555LE : c->lumToYV12 = rgb15leToY_c; break;
  2681. case PIX_FMT_RGB555BE : c->lumToYV12 = rgb15beToY_c; break;
  2682. case PIX_FMT_RGB8 :
  2683. case PIX_FMT_BGR8 :
  2684. case PIX_FMT_PAL8 :
  2685. case PIX_FMT_BGR4_BYTE:
  2686. case PIX_FMT_RGB4_BYTE: c->lumToYV12 = palToY_c; break;
  2687. case PIX_FMT_MONOBLACK: c->lumToYV12 = monoblack2Y_c; break;
  2688. case PIX_FMT_MONOWHITE: c->lumToYV12 = monowhite2Y_c; break;
  2689. case PIX_FMT_RGB32 : c->lumToYV12 = bgr32ToY_c; break;
  2690. case PIX_FMT_RGB32_1: c->lumToYV12 = bgr321ToY_c; break;
  2691. case PIX_FMT_BGR32 : c->lumToYV12 = rgb32ToY_c; break;
  2692. case PIX_FMT_BGR32_1: c->lumToYV12 = rgb321ToY_c; break;
  2693. case PIX_FMT_RGB48BE: c->lumToYV12 = rgb48BEToY_c; break;
  2694. case PIX_FMT_RGB48LE: c->lumToYV12 = rgb48LEToY_c; break;
  2695. case PIX_FMT_BGR48BE: c->lumToYV12 = bgr48BEToY_c; break;
  2696. case PIX_FMT_BGR48LE: c->lumToYV12 = bgr48LEToY_c; break;
  2697. case PIX_FMT_GBR24P : c->lumToYV12 = gbr24pToY_c ; break;
  2698. }
  2699. if (c->alpPixBuf) {
  2700. switch (srcFormat) {
  2701. case PIX_FMT_BGRA:
  2702. case PIX_FMT_RGBA: c->alpToYV12 = rgbaToA_c; break;
  2703. case PIX_FMT_ABGR:
  2704. case PIX_FMT_ARGB: c->alpToYV12 = abgrToA_c; break;
  2705. case PIX_FMT_Y400A: c->alpToYV12 = uyvyToY_c; break;
  2706. case PIX_FMT_PAL8 : c->alpToYV12 = palToA_c; break;
  2707. }
  2708. }
  2709. if (c->srcBpc == 8) {
  2710. if (c->dstBpc <= 10) {
  2711. c->hyScale = c->hcScale = hScale8To15_c;
  2712. if (c->flags & SWS_FAST_BILINEAR) {
  2713. c->hyscale_fast = hyscale_fast_c;
  2714. c->hcscale_fast = hcscale_fast_c;
  2715. }
  2716. } else {
  2717. c->hyScale = c->hcScale = hScale8To19_c;
  2718. }
  2719. } else {
  2720. c->hyScale = c->hcScale = c->dstBpc > 10 ? hScale16To19_c : hScale16To15_c;
  2721. }
  2722. if (c->srcRange != c->dstRange && !isAnyRGB(c->dstFormat)) {
  2723. if (c->dstBpc <= 10) {
  2724. if (c->srcRange) {
  2725. c->lumConvertRange = lumRangeFromJpeg_c;
  2726. c->chrConvertRange = chrRangeFromJpeg_c;
  2727. } else {
  2728. c->lumConvertRange = lumRangeToJpeg_c;
  2729. c->chrConvertRange = chrRangeToJpeg_c;
  2730. }
  2731. } else {
  2732. if (c->srcRange) {
  2733. c->lumConvertRange = lumRangeFromJpeg16_c;
  2734. c->chrConvertRange = chrRangeFromJpeg16_c;
  2735. } else {
  2736. c->lumConvertRange = lumRangeToJpeg16_c;
  2737. c->chrConvertRange = chrRangeToJpeg16_c;
  2738. }
  2739. }
  2740. }
  2741. if (!(isGray(srcFormat) || isGray(c->dstFormat) ||
  2742. srcFormat == PIX_FMT_MONOBLACK || srcFormat == PIX_FMT_MONOWHITE))
  2743. c->needs_hcscale = 1;
  2744. }
  2745. SwsFunc ff_getSwsFunc(SwsContext *c)
  2746. {
  2747. sws_init_swScale_c(c);
  2748. if (HAVE_MMX)
  2749. ff_sws_init_swScale_mmx(c);
  2750. if (HAVE_ALTIVEC)
  2751. ff_sws_init_swScale_altivec(c);
  2752. return swScale;
  2753. }