jsimd_neon.S 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254
  1. /*
  2. * Armv8 Neon optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
  5. * All Rights Reserved.
  6. * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
  7. * Copyright (C) 2013-2014, Linaro Limited. All Rights Reserved.
  8. * Author: Ragesh Radhakrishnan <ragesh.r@linaro.org>
  9. * Copyright (C) 2014-2016, 2020, D. R. Commander. All Rights Reserved.
  10. * Copyright (C) 2015-2016, 2018, Matthieu Darbois. All Rights Reserved.
  11. * Copyright (C) 2016, Siarhei Siamashka. All Rights Reserved.
  12. *
  13. * This software is provided 'as-is', without any express or implied
  14. * warranty. In no event will the authors be held liable for any damages
  15. * arising from the use of this software.
  16. *
  17. * Permission is granted to anyone to use this software for any purpose,
  18. * including commercial applications, and to alter it and redistribute it
  19. * freely, subject to the following restrictions:
  20. *
  21. * 1. The origin of this software must not be misrepresented; you must not
  22. * claim that you wrote the original software. If you use this software
  23. * in a product, an acknowledgment in the product documentation would be
  24. * appreciated but is not required.
  25. * 2. Altered source versions must be plainly marked as such, and must not be
  26. * misrepresented as being the original software.
  27. * 3. This notice may not be removed or altered from any source distribution.
  28. */
  29. #if defined(__linux__) && defined(__ELF__)
  30. .section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
  31. #endif
  32. #if defined(__APPLE__)
  33. .section __DATA, __const
  34. #elif defined(_WIN32)
  35. .section .rdata
  36. #else
  37. .section .rodata, "a", %progbits
  38. #endif
  39. /* Constants for jsimd_idct_islow_neon() */
  40. #define F_0_298 2446 /* FIX(0.298631336) */
  41. #define F_0_390 3196 /* FIX(0.390180644) */
  42. #define F_0_541 4433 /* FIX(0.541196100) */
  43. #define F_0_765 6270 /* FIX(0.765366865) */
  44. #define F_0_899 7373 /* FIX(0.899976223) */
  45. #define F_1_175 9633 /* FIX(1.175875602) */
  46. #define F_1_501 12299 /* FIX(1.501321110) */
  47. #define F_1_847 15137 /* FIX(1.847759065) */
  48. #define F_1_961 16069 /* FIX(1.961570560) */
  49. #define F_2_053 16819 /* FIX(2.053119869) */
  50. #define F_2_562 20995 /* FIX(2.562915447) */
  51. #define F_3_072 25172 /* FIX(3.072711026) */
  52. .balign 16
  53. Ljsimd_idct_islow_neon_consts:
  54. .short F_0_298
  55. .short -F_0_390
  56. .short F_0_541
  57. .short F_0_765
  58. .short - F_0_899
  59. .short F_1_175
  60. .short F_1_501
  61. .short - F_1_847
  62. .short - F_1_961
  63. .short F_2_053
  64. .short - F_2_562
  65. .short F_3_072
  66. .short 0 /* padding */
  67. .short 0
  68. .short 0
  69. .short 0
  70. #undef F_0_298
  71. #undef F_0_390
  72. #undef F_0_541
  73. #undef F_0_765
  74. #undef F_0_899
  75. #undef F_1_175
  76. #undef F_1_501
  77. #undef F_1_847
  78. #undef F_1_961
  79. #undef F_2_053
  80. #undef F_2_562
  81. #undef F_3_072
  82. /* Constants for jsimd_ycc_*_neon() */
  83. .balign 16
  84. Ljsimd_ycc_rgb_neon_consts:
  85. .short 0, 0, 0, 0
  86. .short 22971, -11277, -23401, 29033
  87. .short -128, -128, -128, -128
  88. .short -128, -128, -128, -128
  89. /* Constants for jsimd_*_ycc_neon() */
  90. .balign 16
  91. Ljsimd_rgb_ycc_neon_consts:
  92. .short 19595, 38470, 7471, 11059
  93. .short 21709, 32768, 27439, 5329
  94. .short 32767, 128, 32767, 128
  95. .short 32767, 128, 32767, 128
  96. /* Constants for jsimd_fdct_islow_neon() */
  97. #define F_0_298 2446 /* FIX(0.298631336) */
  98. #define F_0_390 3196 /* FIX(0.390180644) */
  99. #define F_0_541 4433 /* FIX(0.541196100) */
  100. #define F_0_765 6270 /* FIX(0.765366865) */
  101. #define F_0_899 7373 /* FIX(0.899976223) */
  102. #define F_1_175 9633 /* FIX(1.175875602) */
  103. #define F_1_501 12299 /* FIX(1.501321110) */
  104. #define F_1_847 15137 /* FIX(1.847759065) */
  105. #define F_1_961 16069 /* FIX(1.961570560) */
  106. #define F_2_053 16819 /* FIX(2.053119869) */
  107. #define F_2_562 20995 /* FIX(2.562915447) */
  108. #define F_3_072 25172 /* FIX(3.072711026) */
  109. .balign 16
  110. Ljsimd_fdct_islow_neon_consts:
  111. .short F_0_298
  112. .short -F_0_390
  113. .short F_0_541
  114. .short F_0_765
  115. .short - F_0_899
  116. .short F_1_175
  117. .short F_1_501
  118. .short - F_1_847
  119. .short - F_1_961
  120. .short F_2_053
  121. .short - F_2_562
  122. .short F_3_072
  123. .short 0 /* padding */
  124. .short 0
  125. .short 0
  126. .short 0
  127. #undef F_0_298
  128. #undef F_0_390
  129. #undef F_0_541
  130. #undef F_0_765
  131. #undef F_0_899
  132. #undef F_1_175
  133. #undef F_1_501
  134. #undef F_1_847
  135. #undef F_1_961
  136. #undef F_2_053
  137. #undef F_2_562
  138. #undef F_3_072
  139. /* Constants for jsimd_huff_encode_one_block_neon() */
  140. .balign 16
  141. Ljsimd_huff_encode_one_block_neon_consts:
  142. .byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, \
  143. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  144. .byte 0, 1, 2, 3, 16, 17, 32, 33, \
  145. 18, 19, 4, 5, 6, 7, 20, 21 /* L0 => L3 : 4 lines OK */
  146. .byte 34, 35, 48, 49, 255, 255, 50, 51, \
  147. 36, 37, 22, 23, 8, 9, 10, 11 /* L0 => L3 : 4 lines OK */
  148. .byte 8, 9, 22, 23, 36, 37, 50, 51, \
  149. 255, 255, 255, 255, 255, 255, 52, 53 /* L1 => L4 : 4 lines OK */
  150. .byte 54, 55, 40, 41, 26, 27, 12, 13, \
  151. 14, 15, 28, 29, 42, 43, 56, 57 /* L0 => L3 : 4 lines OK */
  152. .byte 6, 7, 20, 21, 34, 35, 48, 49, \
  153. 50, 51, 36, 37, 22, 23, 8, 9 /* L4 => L7 : 4 lines OK */
  154. .byte 42, 43, 28, 29, 14, 15, 30, 31, \
  155. 44, 45, 58, 59, 255, 255, 255, 255 /* L1 => L4 : 4 lines OK */
  156. .byte 255, 255, 255, 255, 56, 57, 42, 43, \
  157. 28, 29, 14, 15, 30, 31, 44, 45 /* L3 => L6 : 4 lines OK */
  158. .byte 26, 27, 40, 41, 42, 43, 28, 29, \
  159. 14, 15, 30, 31, 44, 45, 46, 47 /* L5 => L7 : 3 lines OK */
  160. .byte 255, 255, 255, 255, 0, 1, 255, 255, \
  161. 255, 255, 255, 255, 255, 255, 255, 255 /* L4 : 1 lines OK */
  162. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  163. 0, 1, 16, 17, 2, 3, 255, 255 /* L5 => L6 : 2 lines OK */
  164. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  165. 255, 255, 255, 255, 8, 9, 22, 23 /* L5 => L6 : 2 lines OK */
  166. .byte 4, 5, 6, 7, 255, 255, 255, 255, \
  167. 255, 255, 255, 255, 255, 255, 255, 255 /* L7 : 1 line OK */
  168. .text
  169. /*****************************************************************************/
  170. /* Supplementary macro for setting function attributes */
  171. .macro asm_function fname
  172. #ifdef __APPLE__
  173. .private_extern _\fname
  174. .globl _\fname
  175. _\fname:
  176. #else
  177. .global \fname
  178. #ifdef __ELF__
  179. .hidden \fname
  180. .type \fname, %function
  181. #endif
  182. \fname:
  183. #endif
  184. .endm
  185. /* Get symbol location */
  186. .macro get_symbol_loc reg, symbol
  187. #ifdef __APPLE__
  188. adrp \reg, \symbol@PAGE
  189. add \reg, \reg, \symbol@PAGEOFF
  190. #else
  191. adrp \reg, \symbol
  192. add \reg, \reg, :lo12:\symbol
  193. #endif
  194. .endm
  195. .macro transpose_8x8 l0, l1, l2, l3, l4, l5, l6, l7, t0, t1, t2, t3
  196. trn1 \t0\().8h, \l0\().8h, \l1\().8h
  197. trn1 \t1\().8h, \l2\().8h, \l3\().8h
  198. trn1 \t2\().8h, \l4\().8h, \l5\().8h
  199. trn1 \t3\().8h, \l6\().8h, \l7\().8h
  200. trn2 \l1\().8h, \l0\().8h, \l1\().8h
  201. trn2 \l3\().8h, \l2\().8h, \l3\().8h
  202. trn2 \l5\().8h, \l4\().8h, \l5\().8h
  203. trn2 \l7\().8h, \l6\().8h, \l7\().8h
  204. trn1 \l4\().4s, \t2\().4s, \t3\().4s
  205. trn2 \t3\().4s, \t2\().4s, \t3\().4s
  206. trn1 \t2\().4s, \t0\().4s, \t1\().4s
  207. trn2 \l2\().4s, \t0\().4s, \t1\().4s
  208. trn1 \t0\().4s, \l1\().4s, \l3\().4s
  209. trn2 \l3\().4s, \l1\().4s, \l3\().4s
  210. trn2 \t1\().4s, \l5\().4s, \l7\().4s
  211. trn1 \l5\().4s, \l5\().4s, \l7\().4s
  212. trn2 \l6\().2d, \l2\().2d, \t3\().2d
  213. trn1 \l0\().2d, \t2\().2d, \l4\().2d
  214. trn1 \l1\().2d, \t0\().2d, \l5\().2d
  215. trn2 \l7\().2d, \l3\().2d, \t1\().2d
  216. trn1 \l2\().2d, \l2\().2d, \t3\().2d
  217. trn2 \l4\().2d, \t2\().2d, \l4\().2d
  218. trn1 \l3\().2d, \l3\().2d, \t1\().2d
  219. trn2 \l5\().2d, \t0\().2d, \l5\().2d
  220. .endm
  221. #define CENTERJSAMPLE 128
  222. /*****************************************************************************/
  223. /*
  224. * Perform dequantization and inverse DCT on one block of coefficients.
  225. *
  226. * GLOBAL(void)
  227. * jsimd_idct_islow_neon(void *dct_table, JCOEFPTR coef_block,
  228. * JSAMPARRAY output_buf, JDIMENSION output_col)
  229. */
  230. #define CONST_BITS 13
  231. #define PASS1_BITS 2
  232. #define XFIX_P_0_298 v0.h[0]
  233. #define XFIX_N_0_390 v0.h[1]
  234. #define XFIX_P_0_541 v0.h[2]
  235. #define XFIX_P_0_765 v0.h[3]
  236. #define XFIX_N_0_899 v0.h[4]
  237. #define XFIX_P_1_175 v0.h[5]
  238. #define XFIX_P_1_501 v0.h[6]
  239. #define XFIX_N_1_847 v0.h[7]
  240. #define XFIX_N_1_961 v1.h[0]
  241. #define XFIX_P_2_053 v1.h[1]
  242. #define XFIX_N_2_562 v1.h[2]
  243. #define XFIX_P_3_072 v1.h[3]
  244. asm_function jsimd_idct_islow_neon
  245. DCT_TABLE .req x0
  246. COEF_BLOCK .req x1
  247. OUTPUT_BUF .req x2
  248. OUTPUT_COL .req x3
  249. TMP1 .req x0
  250. TMP2 .req x1
  251. TMP3 .req x9
  252. TMP4 .req x10
  253. TMP5 .req x11
  254. TMP6 .req x12
  255. TMP7 .req x13
  256. TMP8 .req x14
  257. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  258. guarantee that the upper (unused) 32 bits of x3 are valid. This
  259. instruction ensures that those bits are set to zero. */
  260. uxtw x3, w3
  261. sub sp, sp, #64
  262. get_symbol_loc x15, Ljsimd_idct_islow_neon_consts
  263. mov x10, sp
  264. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], #32
  265. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], #32
  266. ld1 {v0.8h, v1.8h}, [x15]
  267. ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [COEF_BLOCK], #64
  268. ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [DCT_TABLE], #64
  269. ld1 {v6.8h, v7.8h, v8.8h, v9.8h}, [COEF_BLOCK], #64
  270. ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [DCT_TABLE], #64
  271. cmeq v16.8h, v3.8h, #0
  272. cmeq v26.8h, v4.8h, #0
  273. cmeq v27.8h, v5.8h, #0
  274. cmeq v28.8h, v6.8h, #0
  275. cmeq v29.8h, v7.8h, #0
  276. cmeq v30.8h, v8.8h, #0
  277. cmeq v31.8h, v9.8h, #0
  278. and v10.16b, v16.16b, v26.16b
  279. and v11.16b, v27.16b, v28.16b
  280. and v12.16b, v29.16b, v30.16b
  281. and v13.16b, v31.16b, v10.16b
  282. and v14.16b, v11.16b, v12.16b
  283. mul v2.8h, v2.8h, v18.8h
  284. and v15.16b, v13.16b, v14.16b
  285. shl v10.8h, v2.8h, #(PASS1_BITS)
  286. sqxtn v16.8b, v15.8h
  287. mov TMP1, v16.d[0]
  288. mvn TMP2, TMP1
  289. cbnz TMP2, 2f
  290. /* case all AC coeffs are zeros */
  291. dup v2.2d, v10.d[0]
  292. dup v6.2d, v10.d[1]
  293. mov v3.16b, v2.16b
  294. mov v7.16b, v6.16b
  295. mov v4.16b, v2.16b
  296. mov v8.16b, v6.16b
  297. mov v5.16b, v2.16b
  298. mov v9.16b, v6.16b
  299. 1:
  300. /* for this transpose, we should organise data like this:
  301. * 00, 01, 02, 03, 40, 41, 42, 43
  302. * 10, 11, 12, 13, 50, 51, 52, 53
  303. * 20, 21, 22, 23, 60, 61, 62, 63
  304. * 30, 31, 32, 33, 70, 71, 72, 73
  305. * 04, 05, 06, 07, 44, 45, 46, 47
  306. * 14, 15, 16, 17, 54, 55, 56, 57
  307. * 24, 25, 26, 27, 64, 65, 66, 67
  308. * 34, 35, 36, 37, 74, 75, 76, 77
  309. */
  310. trn1 v28.8h, v2.8h, v3.8h
  311. trn1 v29.8h, v4.8h, v5.8h
  312. trn1 v30.8h, v6.8h, v7.8h
  313. trn1 v31.8h, v8.8h, v9.8h
  314. trn2 v16.8h, v2.8h, v3.8h
  315. trn2 v17.8h, v4.8h, v5.8h
  316. trn2 v18.8h, v6.8h, v7.8h
  317. trn2 v19.8h, v8.8h, v9.8h
  318. trn1 v2.4s, v28.4s, v29.4s
  319. trn1 v6.4s, v30.4s, v31.4s
  320. trn1 v3.4s, v16.4s, v17.4s
  321. trn1 v7.4s, v18.4s, v19.4s
  322. trn2 v4.4s, v28.4s, v29.4s
  323. trn2 v8.4s, v30.4s, v31.4s
  324. trn2 v5.4s, v16.4s, v17.4s
  325. trn2 v9.4s, v18.4s, v19.4s
  326. /* Even part: reverse the even part of the forward DCT. */
  327. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  328. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  329. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  330. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  331. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  332. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  333. mov v21.16b, v19.16b /* tmp3 = z1 */
  334. mov v20.16b, v18.16b /* tmp3 = z1 */
  335. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  336. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  337. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  338. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  339. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  340. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  341. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  342. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  343. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  344. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  345. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  346. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  347. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  348. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  349. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  350. /* Odd part per figure 8; the matrix is unitary and hence its
  351. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  352. */
  353. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  354. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  355. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  356. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  357. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  358. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  359. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  360. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  361. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  362. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  363. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  364. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  365. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  366. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  367. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  368. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  369. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  370. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  371. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  372. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  373. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  374. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  375. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  376. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  377. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  378. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  379. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  380. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  381. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  382. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  383. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  384. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  385. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  386. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  387. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  388. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  389. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  390. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  391. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  392. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  393. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  394. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  395. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  396. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  397. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  398. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  399. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  400. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  401. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  402. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  403. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  404. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  405. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  406. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  407. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  408. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  409. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  410. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  411. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  412. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  413. shrn v2.4h, v18.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  414. shrn v9.4h, v20.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  415. shrn v3.4h, v22.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  416. shrn v8.4h, v24.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  417. shrn v4.4h, v26.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  418. shrn v7.4h, v28.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  419. shrn v5.4h, v14.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  420. shrn v6.4h, v16.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  421. shrn2 v2.8h, v19.4s, #16 /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  422. shrn2 v9.8h, v21.4s, #16 /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  423. shrn2 v3.8h, v23.4s, #16 /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  424. shrn2 v8.8h, v25.4s, #16 /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  425. shrn2 v4.8h, v27.4s, #16 /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  426. shrn2 v7.8h, v29.4s, #16 /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  427. shrn2 v5.8h, v15.4s, #16 /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  428. shrn2 v6.8h, v17.4s, #16 /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  429. movi v0.16b, #(CENTERJSAMPLE)
  430. /* Prepare pointers (dual-issue with Neon instructions) */
  431. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  432. sqrshrn v28.8b, v2.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  433. ldp TMP3, TMP4, [OUTPUT_BUF], 16
  434. sqrshrn v29.8b, v3.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  435. add TMP1, TMP1, OUTPUT_COL
  436. sqrshrn v30.8b, v4.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  437. add TMP2, TMP2, OUTPUT_COL
  438. sqrshrn v31.8b, v5.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  439. add TMP3, TMP3, OUTPUT_COL
  440. sqrshrn2 v28.16b, v6.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  441. add TMP4, TMP4, OUTPUT_COL
  442. sqrshrn2 v29.16b, v7.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  443. ldp TMP5, TMP6, [OUTPUT_BUF], 16
  444. sqrshrn2 v30.16b, v8.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  445. ldp TMP7, TMP8, [OUTPUT_BUF], 16
  446. sqrshrn2 v31.16b, v9.8h, #(CONST_BITS + PASS1_BITS + 3 - 16)
  447. add TMP5, TMP5, OUTPUT_COL
  448. add v16.16b, v28.16b, v0.16b
  449. add TMP6, TMP6, OUTPUT_COL
  450. add v18.16b, v29.16b, v0.16b
  451. add TMP7, TMP7, OUTPUT_COL
  452. add v20.16b, v30.16b, v0.16b
  453. add TMP8, TMP8, OUTPUT_COL
  454. add v22.16b, v31.16b, v0.16b
  455. /* Transpose the final 8-bit samples */
  456. trn1 v28.16b, v16.16b, v18.16b
  457. trn1 v30.16b, v20.16b, v22.16b
  458. trn2 v29.16b, v16.16b, v18.16b
  459. trn2 v31.16b, v20.16b, v22.16b
  460. trn1 v16.8h, v28.8h, v30.8h
  461. trn2 v18.8h, v28.8h, v30.8h
  462. trn1 v20.8h, v29.8h, v31.8h
  463. trn2 v22.8h, v29.8h, v31.8h
  464. uzp1 v28.4s, v16.4s, v18.4s
  465. uzp2 v30.4s, v16.4s, v18.4s
  466. uzp1 v29.4s, v20.4s, v22.4s
  467. uzp2 v31.4s, v20.4s, v22.4s
  468. /* Store results to the output buffer */
  469. st1 {v28.d}[0], [TMP1]
  470. st1 {v29.d}[0], [TMP2]
  471. st1 {v28.d}[1], [TMP3]
  472. st1 {v29.d}[1], [TMP4]
  473. st1 {v30.d}[0], [TMP5]
  474. st1 {v31.d}[0], [TMP6]
  475. st1 {v30.d}[1], [TMP7]
  476. st1 {v31.d}[1], [TMP8]
  477. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], #32
  478. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], #32
  479. blr x30
  480. .balign 16
  481. 2:
  482. mul v3.8h, v3.8h, v19.8h
  483. mul v4.8h, v4.8h, v20.8h
  484. mul v5.8h, v5.8h, v21.8h
  485. add TMP4, xzr, TMP2, LSL #32
  486. mul v6.8h, v6.8h, v22.8h
  487. mul v7.8h, v7.8h, v23.8h
  488. adds TMP3, xzr, TMP2, LSR #32
  489. mul v8.8h, v8.8h, v24.8h
  490. mul v9.8h, v9.8h, v25.8h
  491. b.ne 3f
  492. /* Right AC coef is zero */
  493. dup v15.2d, v10.d[1]
  494. /* Even part: reverse the even part of the forward DCT. */
  495. add v18.4h, v4.4h, v8.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  496. add v22.4h, v2.4h, v6.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  497. sub v26.4h, v2.4h, v6.4h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  498. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  499. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  500. mov v20.16b, v18.16b /* tmp3 = z1 */
  501. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  502. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  503. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  504. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  505. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  506. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  507. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  508. /* Odd part per figure 8; the matrix is unitary and hence its
  509. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  510. */
  511. add v22.4h, v9.4h, v5.4h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  512. add v24.4h, v7.4h, v3.4h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  513. add v18.4h, v9.4h, v3.4h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  514. add v20.4h, v7.4h, v5.4h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  515. add v26.4h, v22.4h, v24.4h /* z5 = z3 + z4 */
  516. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  517. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  518. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  519. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  520. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  521. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  522. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  523. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  524. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  525. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  526. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  527. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  528. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  529. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  530. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  531. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  532. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  533. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  534. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  535. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  536. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  537. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  538. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  539. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  540. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  541. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  542. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  543. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  544. rshrn v2.4h, v18.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  545. rshrn v3.4h, v22.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  546. rshrn v4.4h, v26.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  547. rshrn v5.4h, v14.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  548. rshrn2 v2.8h, v16.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  549. rshrn2 v3.8h, v28.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  550. rshrn2 v4.8h, v24.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  551. rshrn2 v5.8h, v20.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  552. mov v6.16b, v15.16b
  553. mov v7.16b, v15.16b
  554. mov v8.16b, v15.16b
  555. mov v9.16b, v15.16b
  556. b 1b
  557. .balign 16
  558. 3:
  559. cbnz TMP4, 4f
  560. /* Left AC coef is zero */
  561. dup v14.2d, v10.d[0]
  562. /* Even part: reverse the even part of the forward DCT. */
  563. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  564. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  565. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  566. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  567. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  568. mov v21.16b, v19.16b /* tmp3 = z1 */
  569. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  570. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  571. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  572. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  573. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  574. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  575. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  576. /* Odd part per figure 8; the matrix is unitary and hence its
  577. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  578. */
  579. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  580. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  581. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  582. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  583. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  584. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  585. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  586. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  587. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  588. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  589. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  590. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  591. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  592. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  593. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  594. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  595. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  596. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  597. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  598. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  599. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  600. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  601. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  602. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  603. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  604. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  605. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  606. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  607. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  608. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  609. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  610. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  611. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  612. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  613. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  614. mov v2.16b, v14.16b
  615. mov v3.16b, v14.16b
  616. mov v4.16b, v14.16b
  617. mov v5.16b, v14.16b
  618. rshrn v6.4h, v19.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  619. rshrn v7.4h, v23.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  620. rshrn v8.4h, v27.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  621. rshrn v9.4h, v15.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  622. rshrn2 v6.8h, v17.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  623. rshrn2 v7.8h, v29.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  624. rshrn2 v8.8h, v25.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  625. rshrn2 v9.8h, v21.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  626. b 1b
  627. .balign 16
  628. 4:
  629. /* "No" AC coef is zero */
  630. /* Even part: reverse the even part of the forward DCT. */
  631. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  632. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  633. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  634. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  635. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  636. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  637. mov v21.16b, v19.16b /* tmp3 = z1 */
  638. mov v20.16b, v18.16b /* tmp3 = z1 */
  639. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  640. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); */
  641. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  642. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  643. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  644. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  645. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  646. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  647. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  648. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  649. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  650. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  651. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  652. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  653. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  654. /* Odd part per figure 8; the matrix is unitary and hence its
  655. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  656. */
  657. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  658. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  659. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  660. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  661. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  662. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  663. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  664. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  665. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  666. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  667. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  668. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  669. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  670. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  671. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  672. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  673. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  674. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  675. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  676. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560) */
  677. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644) */
  678. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223) */
  679. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447) */
  680. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  681. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  682. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  683. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  684. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  685. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  686. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  687. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  688. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  689. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  690. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  691. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  692. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  693. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  694. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  695. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  696. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  697. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  698. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  699. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  700. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  701. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  702. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  703. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  704. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  705. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  706. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  707. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  708. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  709. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  710. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  711. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  712. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  713. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  714. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  715. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  716. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  717. rshrn v2.4h, v18.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  718. rshrn v3.4h, v22.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  719. rshrn v4.4h, v26.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  720. rshrn v5.4h, v14.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  721. rshrn v6.4h, v19.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  722. rshrn v7.4h, v23.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  723. rshrn v8.4h, v27.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  724. rshrn v9.4h, v15.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  725. rshrn2 v2.8h, v16.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  726. rshrn2 v3.8h, v28.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  727. rshrn2 v4.8h, v24.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  728. rshrn2 v5.8h, v20.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  729. rshrn2 v6.8h, v17.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  730. rshrn2 v7.8h, v29.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  731. rshrn2 v8.8h, v25.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  732. rshrn2 v9.8h, v21.4s, #(CONST_BITS - PASS1_BITS) /* wsptr[DCTSIZE*7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  733. b 1b
  734. .unreq DCT_TABLE
  735. .unreq COEF_BLOCK
  736. .unreq OUTPUT_BUF
  737. .unreq OUTPUT_COL
  738. .unreq TMP1
  739. .unreq TMP2
  740. .unreq TMP3
  741. .unreq TMP4
  742. .unreq TMP5
  743. .unreq TMP6
  744. .unreq TMP7
  745. .unreq TMP8
  746. #undef CENTERJSAMPLE
  747. #undef CONST_BITS
  748. #undef PASS1_BITS
  749. #undef XFIX_P_0_298
  750. #undef XFIX_N_0_390
  751. #undef XFIX_P_0_541
  752. #undef XFIX_P_0_765
  753. #undef XFIX_N_0_899
  754. #undef XFIX_P_1_175
  755. #undef XFIX_P_1_501
  756. #undef XFIX_N_1_847
  757. #undef XFIX_N_1_961
  758. #undef XFIX_P_2_053
  759. #undef XFIX_N_2_562
  760. #undef XFIX_P_3_072
  761. /*****************************************************************************/
  762. /*
  763. * jsimd_ycc_extrgb_convert_neon
  764. * jsimd_ycc_extbgr_convert_neon
  765. * jsimd_ycc_extrgbx_convert_neon
  766. * jsimd_ycc_extbgrx_convert_neon
  767. * jsimd_ycc_extxbgr_convert_neon
  768. * jsimd_ycc_extxrgb_convert_neon
  769. *
  770. * Colorspace conversion YCbCr -> RGB
  771. */
  772. .macro do_load size
  773. .if \size == 8
  774. ld1 {v4.8b}, [U], 8
  775. ld1 {v5.8b}, [V], 8
  776. ld1 {v0.8b}, [Y], 8
  777. prfm pldl1keep, [U, #64]
  778. prfm pldl1keep, [V, #64]
  779. prfm pldl1keep, [Y, #64]
  780. .elseif \size == 4
  781. ld1 {v4.b}[0], [U], 1
  782. ld1 {v4.b}[1], [U], 1
  783. ld1 {v4.b}[2], [U], 1
  784. ld1 {v4.b}[3], [U], 1
  785. ld1 {v5.b}[0], [V], 1
  786. ld1 {v5.b}[1], [V], 1
  787. ld1 {v5.b}[2], [V], 1
  788. ld1 {v5.b}[3], [V], 1
  789. ld1 {v0.b}[0], [Y], 1
  790. ld1 {v0.b}[1], [Y], 1
  791. ld1 {v0.b}[2], [Y], 1
  792. ld1 {v0.b}[3], [Y], 1
  793. .elseif \size == 2
  794. ld1 {v4.b}[4], [U], 1
  795. ld1 {v4.b}[5], [U], 1
  796. ld1 {v5.b}[4], [V], 1
  797. ld1 {v5.b}[5], [V], 1
  798. ld1 {v0.b}[4], [Y], 1
  799. ld1 {v0.b}[5], [Y], 1
  800. .elseif \size == 1
  801. ld1 {v4.b}[6], [U], 1
  802. ld1 {v5.b}[6], [V], 1
  803. ld1 {v0.b}[6], [Y], 1
  804. .else
  805. .error unsupported macroblock size
  806. .endif
  807. .endm
  808. .macro do_store bpp, size, fast_st3
  809. .if \bpp == 24
  810. .if \size == 8
  811. .if \fast_st3 == 1
  812. st3 {v10.8b, v11.8b, v12.8b}, [RGB], 24
  813. .else
  814. st1 {v10.b}[0], [RGB], #1
  815. st1 {v11.b}[0], [RGB], #1
  816. st1 {v12.b}[0], [RGB], #1
  817. st1 {v10.b}[1], [RGB], #1
  818. st1 {v11.b}[1], [RGB], #1
  819. st1 {v12.b}[1], [RGB], #1
  820. st1 {v10.b}[2], [RGB], #1
  821. st1 {v11.b}[2], [RGB], #1
  822. st1 {v12.b}[2], [RGB], #1
  823. st1 {v10.b}[3], [RGB], #1
  824. st1 {v11.b}[3], [RGB], #1
  825. st1 {v12.b}[3], [RGB], #1
  826. st1 {v10.b}[4], [RGB], #1
  827. st1 {v11.b}[4], [RGB], #1
  828. st1 {v12.b}[4], [RGB], #1
  829. st1 {v10.b}[5], [RGB], #1
  830. st1 {v11.b}[5], [RGB], #1
  831. st1 {v12.b}[5], [RGB], #1
  832. st1 {v10.b}[6], [RGB], #1
  833. st1 {v11.b}[6], [RGB], #1
  834. st1 {v12.b}[6], [RGB], #1
  835. st1 {v10.b}[7], [RGB], #1
  836. st1 {v11.b}[7], [RGB], #1
  837. st1 {v12.b}[7], [RGB], #1
  838. .endif
  839. .elseif \size == 4
  840. st3 {v10.b, v11.b, v12.b}[0], [RGB], 3
  841. st3 {v10.b, v11.b, v12.b}[1], [RGB], 3
  842. st3 {v10.b, v11.b, v12.b}[2], [RGB], 3
  843. st3 {v10.b, v11.b, v12.b}[3], [RGB], 3
  844. .elseif \size == 2
  845. st3 {v10.b, v11.b, v12.b}[4], [RGB], 3
  846. st3 {v10.b, v11.b, v12.b}[5], [RGB], 3
  847. .elseif \size == 1
  848. st3 {v10.b, v11.b, v12.b}[6], [RGB], 3
  849. .else
  850. .error unsupported macroblock size
  851. .endif
  852. .elseif \bpp == 32
  853. .if \size == 8
  854. st4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], 32
  855. .elseif \size == 4
  856. st4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], 4
  857. st4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], 4
  858. st4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], 4
  859. st4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], 4
  860. .elseif \size == 2
  861. st4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], 4
  862. st4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], 4
  863. .elseif \size == 1
  864. st4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], 4
  865. .else
  866. .error unsupported macroblock size
  867. .endif
  868. .elseif \bpp == 16
  869. .if \size == 8
  870. st1 {v25.8h}, [RGB], 16
  871. .elseif \size == 4
  872. st1 {v25.4h}, [RGB], 8
  873. .elseif \size == 2
  874. st1 {v25.h}[4], [RGB], 2
  875. st1 {v25.h}[5], [RGB], 2
  876. .elseif \size == 1
  877. st1 {v25.h}[6], [RGB], 2
  878. .else
  879. .error unsupported macroblock size
  880. .endif
  881. .else
  882. .error unsupported bpp
  883. .endif
  884. .endm
  885. .macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, rsize, \
  886. g_offs, gsize, b_offs, bsize, \
  887. defsize, fast_st3
  888. /*
  889. * 2-stage pipelined YCbCr->RGB conversion
  890. */
  891. .macro do_yuv_to_rgb_stage1
  892. uaddw v6.8h, v2.8h, v4.8b /* q3 = u - 128 */
  893. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  894. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  895. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  896. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  897. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  898. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  899. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  900. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  901. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  902. .endm
  903. .macro do_yuv_to_rgb_stage2
  904. rshrn v20.4h, v20.4s, #15
  905. rshrn2 v20.8h, v22.4s, #15
  906. rshrn v24.4h, v24.4s, #14
  907. rshrn2 v24.8h, v26.4s, #14
  908. rshrn v28.4h, v28.4s, #14
  909. rshrn2 v28.8h, v30.4s, #14
  910. uaddw v20.8h, v20.8h, v0.8b
  911. uaddw v24.8h, v24.8h, v0.8b
  912. uaddw v28.8h, v28.8h, v0.8b
  913. .if \bpp != 16
  914. sqxtun v1\g_offs\defsize, v20.8h
  915. sqxtun v1\r_offs\defsize, v24.8h
  916. sqxtun v1\b_offs\defsize, v28.8h
  917. .else
  918. sqshlu v21.8h, v20.8h, #8
  919. sqshlu v25.8h, v24.8h, #8
  920. sqshlu v29.8h, v28.8h, #8
  921. sri v25.8h, v21.8h, #5
  922. sri v25.8h, v29.8h, #11
  923. .endif
  924. .endm
  925. .macro do_yuv_to_rgb_stage2_store_load_stage1 fast_st3
  926. rshrn v20.4h, v20.4s, #15
  927. rshrn v24.4h, v24.4s, #14
  928. rshrn v28.4h, v28.4s, #14
  929. ld1 {v4.8b}, [U], 8
  930. rshrn2 v20.8h, v22.4s, #15
  931. rshrn2 v24.8h, v26.4s, #14
  932. rshrn2 v28.8h, v30.4s, #14
  933. ld1 {v5.8b}, [V], 8
  934. uaddw v20.8h, v20.8h, v0.8b
  935. uaddw v24.8h, v24.8h, v0.8b
  936. uaddw v28.8h, v28.8h, v0.8b
  937. .if \bpp != 16 /**************** rgb24/rgb32 ******************************/
  938. sqxtun v1\g_offs\defsize, v20.8h
  939. ld1 {v0.8b}, [Y], 8
  940. sqxtun v1\r_offs\defsize, v24.8h
  941. prfm pldl1keep, [U, #64]
  942. prfm pldl1keep, [V, #64]
  943. prfm pldl1keep, [Y, #64]
  944. sqxtun v1\b_offs\defsize, v28.8h
  945. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  946. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  947. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  948. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  949. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  950. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  951. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  952. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  953. .else /**************************** rgb565 ********************************/
  954. sqshlu v21.8h, v20.8h, #8
  955. sqshlu v25.8h, v24.8h, #8
  956. sqshlu v29.8h, v28.8h, #8
  957. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  958. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  959. ld1 {v0.8b}, [Y], 8
  960. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  961. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  962. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  963. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  964. sri v25.8h, v21.8h, #5
  965. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  966. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  967. prfm pldl1keep, [U, #64]
  968. prfm pldl1keep, [V, #64]
  969. prfm pldl1keep, [Y, #64]
  970. sri v25.8h, v29.8h, #11
  971. .endif
  972. do_store \bpp, 8, \fast_st3
  973. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  974. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  975. .endm
  976. .macro do_yuv_to_rgb
  977. do_yuv_to_rgb_stage1
  978. do_yuv_to_rgb_stage2
  979. .endm
  980. .if \fast_st3 == 1
  981. asm_function jsimd_ycc_\colorid\()_convert_neon
  982. .else
  983. asm_function jsimd_ycc_\colorid\()_convert_neon_slowst3
  984. .endif
  985. OUTPUT_WIDTH .req w0
  986. INPUT_BUF .req x1
  987. INPUT_ROW .req w2
  988. OUTPUT_BUF .req x3
  989. NUM_ROWS .req w4
  990. INPUT_BUF0 .req x5
  991. INPUT_BUF1 .req x6
  992. INPUT_BUF2 .req x1
  993. RGB .req x7
  994. Y .req x9
  995. U .req x10
  996. V .req x11
  997. N .req w15
  998. sub sp, sp, 64
  999. mov x9, sp
  1000. /* Load constants to d1, d2, d3 (v0.4h is just used for padding) */
  1001. get_symbol_loc x15, Ljsimd_ycc_rgb_neon_consts
  1002. /* Save Neon registers */
  1003. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1004. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1005. ld1 {v0.4h, v1.4h}, [x15], 16
  1006. ld1 {v2.8h}, [x15]
  1007. ldr INPUT_BUF0, [INPUT_BUF]
  1008. ldr INPUT_BUF1, [INPUT_BUF, #8]
  1009. ldr INPUT_BUF2, [INPUT_BUF, #16]
  1010. .unreq INPUT_BUF
  1011. /* Initially set v10, v11.4h, v12.8b, d13 to 0xFF */
  1012. movi v10.16b, #255
  1013. movi v13.16b, #255
  1014. /* Outer loop over scanlines */
  1015. cmp NUM_ROWS, #1
  1016. b.lt 9f
  1017. 0:
  1018. ldr Y, [INPUT_BUF0, INPUT_ROW, uxtw #3]
  1019. ldr U, [INPUT_BUF1, INPUT_ROW, uxtw #3]
  1020. mov N, OUTPUT_WIDTH
  1021. ldr V, [INPUT_BUF2, INPUT_ROW, uxtw #3]
  1022. add INPUT_ROW, INPUT_ROW, #1
  1023. ldr RGB, [OUTPUT_BUF], #8
  1024. /* Inner loop over pixels */
  1025. subs N, N, #8
  1026. b.lt 3f
  1027. do_load 8
  1028. do_yuv_to_rgb_stage1
  1029. subs N, N, #8
  1030. b.lt 2f
  1031. 1:
  1032. do_yuv_to_rgb_stage2_store_load_stage1 \fast_st3
  1033. subs N, N, #8
  1034. b.ge 1b
  1035. 2:
  1036. do_yuv_to_rgb_stage2
  1037. do_store \bpp, 8, \fast_st3
  1038. tst N, #7
  1039. b.eq 8f
  1040. 3:
  1041. tst N, #4
  1042. b.eq 3f
  1043. do_load 4
  1044. 3:
  1045. tst N, #2
  1046. b.eq 4f
  1047. do_load 2
  1048. 4:
  1049. tst N, #1
  1050. b.eq 5f
  1051. do_load 1
  1052. 5:
  1053. do_yuv_to_rgb
  1054. tst N, #4
  1055. b.eq 6f
  1056. do_store \bpp, 4, \fast_st3
  1057. 6:
  1058. tst N, #2
  1059. b.eq 7f
  1060. do_store \bpp, 2, \fast_st3
  1061. 7:
  1062. tst N, #1
  1063. b.eq 8f
  1064. do_store \bpp, 1, \fast_st3
  1065. 8:
  1066. subs NUM_ROWS, NUM_ROWS, #1
  1067. b.gt 0b
  1068. 9:
  1069. /* Restore all registers and return */
  1070. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1071. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1072. br x30
  1073. .unreq OUTPUT_WIDTH
  1074. .unreq INPUT_ROW
  1075. .unreq OUTPUT_BUF
  1076. .unreq NUM_ROWS
  1077. .unreq INPUT_BUF0
  1078. .unreq INPUT_BUF1
  1079. .unreq INPUT_BUF2
  1080. .unreq RGB
  1081. .unreq Y
  1082. .unreq U
  1083. .unreq V
  1084. .unreq N
  1085. .purgem do_yuv_to_rgb
  1086. .purgem do_yuv_to_rgb_stage1
  1087. .purgem do_yuv_to_rgb_stage2
  1088. .purgem do_yuv_to_rgb_stage2_store_load_stage1
  1089. .endm
  1090. /*--------------------------------- id ----- bpp R rsize G gsize B bsize defsize fast_st3*/
  1091. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1092. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1093. generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1094. generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1095. generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, .4h, 2, .4h, 1, .4h, .8b, 1
  1096. generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, .4h, 2, .4h, 3, .4h, .8b, 1
  1097. generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, .4h, 0, .4h, 0, .4h, .8b, 1
  1098. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 0
  1099. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 0
  1100. .purgem do_load
  1101. .purgem do_store
  1102. /*****************************************************************************/
  1103. /*
  1104. * jsimd_extrgb_ycc_convert_neon
  1105. * jsimd_extbgr_ycc_convert_neon
  1106. * jsimd_extrgbx_ycc_convert_neon
  1107. * jsimd_extbgrx_ycc_convert_neon
  1108. * jsimd_extxbgr_ycc_convert_neon
  1109. * jsimd_extxrgb_ycc_convert_neon
  1110. *
  1111. * Colorspace conversion RGB -> YCbCr
  1112. */
  1113. .macro do_store size
  1114. .if \size == 8
  1115. st1 {v20.8b}, [Y], #8
  1116. st1 {v21.8b}, [U], #8
  1117. st1 {v22.8b}, [V], #8
  1118. .elseif \size == 4
  1119. st1 {v20.b}[0], [Y], #1
  1120. st1 {v20.b}[1], [Y], #1
  1121. st1 {v20.b}[2], [Y], #1
  1122. st1 {v20.b}[3], [Y], #1
  1123. st1 {v21.b}[0], [U], #1
  1124. st1 {v21.b}[1], [U], #1
  1125. st1 {v21.b}[2], [U], #1
  1126. st1 {v21.b}[3], [U], #1
  1127. st1 {v22.b}[0], [V], #1
  1128. st1 {v22.b}[1], [V], #1
  1129. st1 {v22.b}[2], [V], #1
  1130. st1 {v22.b}[3], [V], #1
  1131. .elseif \size == 2
  1132. st1 {v20.b}[4], [Y], #1
  1133. st1 {v20.b}[5], [Y], #1
  1134. st1 {v21.b}[4], [U], #1
  1135. st1 {v21.b}[5], [U], #1
  1136. st1 {v22.b}[4], [V], #1
  1137. st1 {v22.b}[5], [V], #1
  1138. .elseif \size == 1
  1139. st1 {v20.b}[6], [Y], #1
  1140. st1 {v21.b}[6], [U], #1
  1141. st1 {v22.b}[6], [V], #1
  1142. .else
  1143. .error unsupported macroblock size
  1144. .endif
  1145. .endm
  1146. .macro do_load bpp, size, fast_ld3
  1147. .if \bpp == 24
  1148. .if \size == 8
  1149. .if \fast_ld3 == 1
  1150. ld3 {v10.8b, v11.8b, v12.8b}, [RGB], #24
  1151. .else
  1152. ld1 {v10.b}[0], [RGB], #1
  1153. ld1 {v11.b}[0], [RGB], #1
  1154. ld1 {v12.b}[0], [RGB], #1
  1155. ld1 {v10.b}[1], [RGB], #1
  1156. ld1 {v11.b}[1], [RGB], #1
  1157. ld1 {v12.b}[1], [RGB], #1
  1158. ld1 {v10.b}[2], [RGB], #1
  1159. ld1 {v11.b}[2], [RGB], #1
  1160. ld1 {v12.b}[2], [RGB], #1
  1161. ld1 {v10.b}[3], [RGB], #1
  1162. ld1 {v11.b}[3], [RGB], #1
  1163. ld1 {v12.b}[3], [RGB], #1
  1164. ld1 {v10.b}[4], [RGB], #1
  1165. ld1 {v11.b}[4], [RGB], #1
  1166. ld1 {v12.b}[4], [RGB], #1
  1167. ld1 {v10.b}[5], [RGB], #1
  1168. ld1 {v11.b}[5], [RGB], #1
  1169. ld1 {v12.b}[5], [RGB], #1
  1170. ld1 {v10.b}[6], [RGB], #1
  1171. ld1 {v11.b}[6], [RGB], #1
  1172. ld1 {v12.b}[6], [RGB], #1
  1173. ld1 {v10.b}[7], [RGB], #1
  1174. ld1 {v11.b}[7], [RGB], #1
  1175. ld1 {v12.b}[7], [RGB], #1
  1176. .endif
  1177. prfm pldl1keep, [RGB, #128]
  1178. .elseif \size == 4
  1179. ld3 {v10.b, v11.b, v12.b}[0], [RGB], #3
  1180. ld3 {v10.b, v11.b, v12.b}[1], [RGB], #3
  1181. ld3 {v10.b, v11.b, v12.b}[2], [RGB], #3
  1182. ld3 {v10.b, v11.b, v12.b}[3], [RGB], #3
  1183. .elseif \size == 2
  1184. ld3 {v10.b, v11.b, v12.b}[4], [RGB], #3
  1185. ld3 {v10.b, v11.b, v12.b}[5], [RGB], #3
  1186. .elseif \size == 1
  1187. ld3 {v10.b, v11.b, v12.b}[6], [RGB], #3
  1188. .else
  1189. .error unsupported macroblock size
  1190. .endif
  1191. .elseif \bpp == 32
  1192. .if \size == 8
  1193. ld4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], #32
  1194. prfm pldl1keep, [RGB, #128]
  1195. .elseif \size == 4
  1196. ld4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], #4
  1197. ld4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], #4
  1198. ld4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], #4
  1199. ld4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], #4
  1200. .elseif \size == 2
  1201. ld4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], #4
  1202. ld4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], #4
  1203. .elseif \size == 1
  1204. ld4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], #4
  1205. .else
  1206. .error unsupported macroblock size
  1207. .endif
  1208. .else
  1209. .error unsupported bpp
  1210. .endif
  1211. .endm
  1212. .macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, \
  1213. b_offs, fast_ld3
  1214. /*
  1215. * 2-stage pipelined RGB->YCbCr conversion
  1216. */
  1217. .macro do_rgb_to_yuv_stage1
  1218. ushll v4.8h, v1\r_offs\().8b, #0 /* r = v4 */
  1219. ushll v6.8h, v1\g_offs\().8b, #0 /* g = v6 */
  1220. ushll v8.8h, v1\b_offs\().8b, #0 /* b = v8 */
  1221. rev64 v18.4s, v1.4s
  1222. rev64 v26.4s, v1.4s
  1223. rev64 v28.4s, v1.4s
  1224. rev64 v30.4s, v1.4s
  1225. umull v14.4s, v4.4h, v0.h[0]
  1226. umull2 v16.4s, v4.8h, v0.h[0]
  1227. umlsl v18.4s, v4.4h, v0.h[3]
  1228. umlsl2 v26.4s, v4.8h, v0.h[3]
  1229. umlal v28.4s, v4.4h, v0.h[5]
  1230. umlal2 v30.4s, v4.8h, v0.h[5]
  1231. umlal v14.4s, v6.4h, v0.h[1]
  1232. umlal2 v16.4s, v6.8h, v0.h[1]
  1233. umlsl v18.4s, v6.4h, v0.h[4]
  1234. umlsl2 v26.4s, v6.8h, v0.h[4]
  1235. umlsl v28.4s, v6.4h, v0.h[6]
  1236. umlsl2 v30.4s, v6.8h, v0.h[6]
  1237. umlal v14.4s, v8.4h, v0.h[2]
  1238. umlal2 v16.4s, v8.8h, v0.h[2]
  1239. umlal v18.4s, v8.4h, v0.h[5]
  1240. umlal2 v26.4s, v8.8h, v0.h[5]
  1241. umlsl v28.4s, v8.4h, v0.h[7]
  1242. umlsl2 v30.4s, v8.8h, v0.h[7]
  1243. .endm
  1244. .macro do_rgb_to_yuv_stage2
  1245. rshrn v20.4h, v14.4s, #16
  1246. shrn v22.4h, v18.4s, #16
  1247. shrn v24.4h, v28.4s, #16
  1248. rshrn2 v20.8h, v16.4s, #16
  1249. shrn2 v22.8h, v26.4s, #16
  1250. shrn2 v24.8h, v30.4s, #16
  1251. xtn v20.8b, v20.8h /* v20 = y */
  1252. xtn v21.8b, v22.8h /* v21 = u */
  1253. xtn v22.8b, v24.8h /* v22 = v */
  1254. .endm
  1255. .macro do_rgb_to_yuv
  1256. do_rgb_to_yuv_stage1
  1257. do_rgb_to_yuv_stage2
  1258. .endm
  1259. /* TODO: expand macros and interleave instructions if some in-order
  1260. * AArch64 processor actually can dual-issue LOAD/STORE with ALU */
  1261. .macro do_rgb_to_yuv_stage2_store_load_stage1 fast_ld3
  1262. do_rgb_to_yuv_stage2
  1263. do_load \bpp, 8, \fast_ld3
  1264. st1 {v20.8b}, [Y], #8
  1265. st1 {v21.8b}, [U], #8
  1266. st1 {v22.8b}, [V], #8
  1267. do_rgb_to_yuv_stage1
  1268. .endm
  1269. .if \fast_ld3 == 1
  1270. asm_function jsimd_\colorid\()_ycc_convert_neon
  1271. .else
  1272. asm_function jsimd_\colorid\()_ycc_convert_neon_slowld3
  1273. .endif
  1274. OUTPUT_WIDTH .req w0
  1275. INPUT_BUF .req x1
  1276. OUTPUT_BUF .req x2
  1277. OUTPUT_ROW .req w3
  1278. NUM_ROWS .req w4
  1279. OUTPUT_BUF0 .req x5
  1280. OUTPUT_BUF1 .req x6
  1281. OUTPUT_BUF2 .req x2 /* OUTPUT_BUF */
  1282. RGB .req x7
  1283. Y .req x9
  1284. U .req x10
  1285. V .req x11
  1286. N .req w12
  1287. /* Load constants to d0, d1, d2, d3 */
  1288. get_symbol_loc x13, Ljsimd_rgb_ycc_neon_consts
  1289. ld1 {v0.8h, v1.8h}, [x13]
  1290. ldr OUTPUT_BUF0, [OUTPUT_BUF]
  1291. ldr OUTPUT_BUF1, [OUTPUT_BUF, #8]
  1292. ldr OUTPUT_BUF2, [OUTPUT_BUF, #16]
  1293. .unreq OUTPUT_BUF
  1294. /* Save Neon registers */
  1295. sub sp, sp, #64
  1296. mov x9, sp
  1297. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1298. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1299. /* Outer loop over scanlines */
  1300. cmp NUM_ROWS, #1
  1301. b.lt 9f
  1302. 0:
  1303. ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, uxtw #3]
  1304. ldr U, [OUTPUT_BUF1, OUTPUT_ROW, uxtw #3]
  1305. mov N, OUTPUT_WIDTH
  1306. ldr V, [OUTPUT_BUF2, OUTPUT_ROW, uxtw #3]
  1307. add OUTPUT_ROW, OUTPUT_ROW, #1
  1308. ldr RGB, [INPUT_BUF], #8
  1309. /* Inner loop over pixels */
  1310. subs N, N, #8
  1311. b.lt 3f
  1312. do_load \bpp, 8, \fast_ld3
  1313. do_rgb_to_yuv_stage1
  1314. subs N, N, #8
  1315. b.lt 2f
  1316. 1:
  1317. do_rgb_to_yuv_stage2_store_load_stage1 \fast_ld3
  1318. subs N, N, #8
  1319. b.ge 1b
  1320. 2:
  1321. do_rgb_to_yuv_stage2
  1322. do_store 8
  1323. tst N, #7
  1324. b.eq 8f
  1325. 3:
  1326. tbz N, #2, 3f
  1327. do_load \bpp, 4, \fast_ld3
  1328. 3:
  1329. tbz N, #1, 4f
  1330. do_load \bpp, 2, \fast_ld3
  1331. 4:
  1332. tbz N, #0, 5f
  1333. do_load \bpp, 1, \fast_ld3
  1334. 5:
  1335. do_rgb_to_yuv
  1336. tbz N, #2, 6f
  1337. do_store 4
  1338. 6:
  1339. tbz N, #1, 7f
  1340. do_store 2
  1341. 7:
  1342. tbz N, #0, 8f
  1343. do_store 1
  1344. 8:
  1345. subs NUM_ROWS, NUM_ROWS, #1
  1346. b.gt 0b
  1347. 9:
  1348. /* Restore all registers and return */
  1349. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1350. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1351. br x30
  1352. .unreq OUTPUT_WIDTH
  1353. .unreq OUTPUT_ROW
  1354. .unreq INPUT_BUF
  1355. .unreq NUM_ROWS
  1356. .unreq OUTPUT_BUF0
  1357. .unreq OUTPUT_BUF1
  1358. .unreq OUTPUT_BUF2
  1359. .unreq RGB
  1360. .unreq Y
  1361. .unreq U
  1362. .unreq V
  1363. .unreq N
  1364. .purgem do_rgb_to_yuv
  1365. .purgem do_rgb_to_yuv_stage1
  1366. .purgem do_rgb_to_yuv_stage2
  1367. .purgem do_rgb_to_yuv_stage2_store_load_stage1
  1368. .endm
  1369. /*--------------------------------- id ----- bpp R G B Fast LD3 */
  1370. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 1
  1371. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 1
  1372. generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2, 1
  1373. generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0, 1
  1374. generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1, 1
  1375. generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3, 1
  1376. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 0
  1377. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 0
  1378. .purgem do_load
  1379. .purgem do_store
  1380. /*****************************************************************************/
  1381. /*
  1382. * jsimd_fdct_islow_neon
  1383. *
  1384. * This file contains a slower but more accurate integer implementation of the
  1385. * forward DCT (Discrete Cosine Transform). The following code is based
  1386. * directly on the IJG''s original jfdctint.c; see the jfdctint.c for
  1387. * more details.
  1388. *
  1389. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  1390. * rid of a bunch of VLD1.16 instructions
  1391. */
  1392. #define CONST_BITS 13
  1393. #define PASS1_BITS 2
  1394. #define DESCALE_P1 (CONST_BITS - PASS1_BITS)
  1395. #define DESCALE_P2 (CONST_BITS + PASS1_BITS)
  1396. #define XFIX_P_0_298 v0.h[0]
  1397. #define XFIX_N_0_390 v0.h[1]
  1398. #define XFIX_P_0_541 v0.h[2]
  1399. #define XFIX_P_0_765 v0.h[3]
  1400. #define XFIX_N_0_899 v0.h[4]
  1401. #define XFIX_P_1_175 v0.h[5]
  1402. #define XFIX_P_1_501 v0.h[6]
  1403. #define XFIX_N_1_847 v0.h[7]
  1404. #define XFIX_N_1_961 v1.h[0]
  1405. #define XFIX_P_2_053 v1.h[1]
  1406. #define XFIX_N_2_562 v1.h[2]
  1407. #define XFIX_P_3_072 v1.h[3]
  1408. asm_function jsimd_fdct_islow_neon
  1409. DATA .req x0
  1410. TMP .req x9
  1411. /* Load constants */
  1412. get_symbol_loc TMP, Ljsimd_fdct_islow_neon_consts
  1413. ld1 {v0.8h, v1.8h}, [TMP]
  1414. /* Save Neon registers */
  1415. sub sp, sp, #64
  1416. mov x10, sp
  1417. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], 32
  1418. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], 32
  1419. /* Load all DATA into Neon registers with the following allocation:
  1420. * 0 1 2 3 | 4 5 6 7
  1421. * ---------+--------
  1422. * 0 | d16 | d17 | v16.8h
  1423. * 1 | d18 | d19 | v17.8h
  1424. * 2 | d20 | d21 | v18.8h
  1425. * 3 | d22 | d23 | v19.8h
  1426. * 4 | d24 | d25 | v20.8h
  1427. * 5 | d26 | d27 | v21.8h
  1428. * 6 | d28 | d29 | v22.8h
  1429. * 7 | d30 | d31 | v23.8h
  1430. */
  1431. ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  1432. ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  1433. sub DATA, DATA, #64
  1434. /* Transpose */
  1435. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  1436. /* 1-D FDCT */
  1437. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  1438. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  1439. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  1440. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  1441. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  1442. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  1443. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  1444. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  1445. /* even part */
  1446. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  1447. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  1448. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  1449. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  1450. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  1451. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  1452. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  1453. shl v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); */
  1454. shl v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); */
  1455. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  1456. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  1457. mov v22.16b, v18.16b
  1458. mov v25.16b, v24.16b
  1459. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  1460. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  1461. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  1462. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  1463. rshrn v18.4h, v18.4s, #DESCALE_P1
  1464. rshrn v22.4h, v22.4s, #DESCALE_P1
  1465. rshrn2 v18.8h, v24.4s, #DESCALE_P1 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  1466. rshrn2 v22.8h, v25.4s, #DESCALE_P1 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  1467. /* Odd part */
  1468. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  1469. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  1470. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  1471. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  1472. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  1473. smull2 v5.4s, v10.8h, XFIX_P_1_175
  1474. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  1475. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  1476. smull2 v24.4s, v28.8h, XFIX_P_0_298
  1477. smull2 v25.4s, v29.8h, XFIX_P_2_053
  1478. smull2 v26.4s, v30.8h, XFIX_P_3_072
  1479. smull2 v27.4s, v31.8h, XFIX_P_1_501
  1480. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  1481. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  1482. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  1483. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  1484. smull2 v12.4s, v8.8h, XFIX_N_0_899
  1485. smull2 v13.4s, v9.8h, XFIX_N_2_562
  1486. smull2 v14.4s, v10.8h, XFIX_N_1_961
  1487. smull2 v15.4s, v11.8h, XFIX_N_0_390
  1488. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
  1489. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
  1490. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
  1491. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
  1492. add v10.4s, v10.4s, v4.4s /* z3 += z5 */
  1493. add v14.4s, v14.4s, v5.4s
  1494. add v11.4s, v11.4s, v4.4s /* z4 += z5 */
  1495. add v15.4s, v15.4s, v5.4s
  1496. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  1497. add v24.4s, v24.4s, v12.4s
  1498. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  1499. add v25.4s, v25.4s, v13.4s
  1500. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  1501. add v26.4s, v26.4s, v14.4s
  1502. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  1503. add v27.4s, v27.4s, v15.4s
  1504. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  1505. add v24.4s, v24.4s, v14.4s
  1506. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  1507. add v25.4s, v25.4s, v15.4s
  1508. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  1509. add v26.4s, v26.4s, v13.4s
  1510. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  1511. add v27.4s, v27.4s, v12.4s
  1512. rshrn v23.4h, v28.4s, #DESCALE_P1
  1513. rshrn v21.4h, v29.4s, #DESCALE_P1
  1514. rshrn v19.4h, v30.4s, #DESCALE_P1
  1515. rshrn v17.4h, v31.4s, #DESCALE_P1
  1516. rshrn2 v23.8h, v24.4s, #DESCALE_P1 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  1517. rshrn2 v21.8h, v25.4s, #DESCALE_P1 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  1518. rshrn2 v19.8h, v26.4s, #DESCALE_P1 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  1519. rshrn2 v17.8h, v27.4s, #DESCALE_P1 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  1520. /* Transpose */
  1521. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  1522. /* 1-D FDCT */
  1523. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  1524. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  1525. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  1526. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  1527. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  1528. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  1529. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  1530. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  1531. /* even part */
  1532. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  1533. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  1534. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  1535. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  1536. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  1537. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  1538. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  1539. srshr v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM)DESCALE(tmp10 + tmp11, PASS1_BITS); */
  1540. srshr v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM)DESCALE(tmp10 - tmp11, PASS1_BITS); */
  1541. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  1542. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  1543. mov v22.16b, v18.16b
  1544. mov v25.16b, v24.16b
  1545. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  1546. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  1547. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  1548. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  1549. rshrn v18.4h, v18.4s, #DESCALE_P2
  1550. rshrn v22.4h, v22.4s, #DESCALE_P2
  1551. rshrn2 v18.8h, v24.4s, #DESCALE_P2 /* dataptr[2] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  1552. rshrn2 v22.8h, v25.4s, #DESCALE_P2 /* dataptr[6] = (DCTELEM)DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  1553. /* Odd part */
  1554. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  1555. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  1556. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  1557. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  1558. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  1559. smull2 v5.4s, v10.8h, XFIX_P_1_175
  1560. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  1561. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  1562. smull2 v24.4s, v28.8h, XFIX_P_0_298
  1563. smull2 v25.4s, v29.8h, XFIX_P_2_053
  1564. smull2 v26.4s, v30.8h, XFIX_P_3_072
  1565. smull2 v27.4s, v31.8h, XFIX_P_1_501
  1566. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  1567. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  1568. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  1569. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  1570. smull2 v12.4s, v8.8h, XFIX_N_0_899
  1571. smull2 v13.4s, v9.8h, XFIX_N_2_562
  1572. smull2 v14.4s, v10.8h, XFIX_N_1_961
  1573. smull2 v15.4s, v11.8h, XFIX_N_0_390
  1574. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, -FIX_0_899976223); */
  1575. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, -FIX_2_562915447); */
  1576. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, -FIX_1_961570560); */
  1577. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, -FIX_0_390180644); */
  1578. add v10.4s, v10.4s, v4.4s
  1579. add v14.4s, v14.4s, v5.4s
  1580. add v11.4s, v11.4s, v4.4s
  1581. add v15.4s, v15.4s, v5.4s
  1582. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  1583. add v24.4s, v24.4s, v12.4s
  1584. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  1585. add v25.4s, v25.4s, v13.4s
  1586. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  1587. add v26.4s, v26.4s, v14.4s
  1588. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  1589. add v27.4s, v27.4s, v15.4s
  1590. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  1591. add v24.4s, v24.4s, v14.4s
  1592. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  1593. add v25.4s, v25.4s, v15.4s
  1594. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  1595. add v26.4s, v26.4s, v13.4s
  1596. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  1597. add v27.4s, v27.4s, v12.4s
  1598. rshrn v23.4h, v28.4s, #DESCALE_P2
  1599. rshrn v21.4h, v29.4s, #DESCALE_P2
  1600. rshrn v19.4h, v30.4s, #DESCALE_P2
  1601. rshrn v17.4h, v31.4s, #DESCALE_P2
  1602. rshrn2 v23.8h, v24.4s, #DESCALE_P2 /* dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  1603. rshrn2 v21.8h, v25.4s, #DESCALE_P2 /* dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  1604. rshrn2 v19.8h, v26.4s, #DESCALE_P2 /* dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  1605. rshrn2 v17.8h, v27.4s, #DESCALE_P2 /* dataptr[1] = (DCTELEM)DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  1606. /* store results */
  1607. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  1608. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  1609. /* Restore Neon registers */
  1610. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1611. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1612. br x30
  1613. .unreq DATA
  1614. .unreq TMP
  1615. #undef XFIX_P_0_298
  1616. #undef XFIX_N_0_390
  1617. #undef XFIX_P_0_541
  1618. #undef XFIX_P_0_765
  1619. #undef XFIX_N_0_899
  1620. #undef XFIX_P_1_175
  1621. #undef XFIX_P_1_501
  1622. #undef XFIX_N_1_847
  1623. #undef XFIX_N_1_961
  1624. #undef XFIX_P_2_053
  1625. #undef XFIX_N_2_562
  1626. #undef XFIX_P_3_072
  1627. /*****************************************************************************/
  1628. /*
  1629. * GLOBAL(JOCTET *)
  1630. * jsimd_huff_encode_one_block(working_state *state, JOCTET *buffer,
  1631. * JCOEFPTR block, int last_dc_val,
  1632. * c_derived_tbl *dctbl, c_derived_tbl *actbl)
  1633. *
  1634. */
  1635. BUFFER .req x1
  1636. PUT_BUFFER .req x6
  1637. PUT_BITS .req x7
  1638. PUT_BITSw .req w7
  1639. .macro emit_byte
  1640. sub PUT_BITS, PUT_BITS, #0x8
  1641. lsr x19, PUT_BUFFER, PUT_BITS
  1642. uxtb w19, w19
  1643. strb w19, [BUFFER, #1]!
  1644. cmp w19, #0xff
  1645. b.ne 14f
  1646. strb wzr, [BUFFER, #1]!
  1647. 14:
  1648. .endm
  1649. .macro put_bits CODE, SIZE
  1650. lsl PUT_BUFFER, PUT_BUFFER, \SIZE
  1651. add PUT_BITS, PUT_BITS, \SIZE
  1652. orr PUT_BUFFER, PUT_BUFFER, \CODE
  1653. .endm
  1654. .macro checkbuf31
  1655. cmp PUT_BITS, #0x20
  1656. b.lt 31f
  1657. emit_byte
  1658. emit_byte
  1659. emit_byte
  1660. emit_byte
  1661. 31:
  1662. .endm
  1663. .macro checkbuf47
  1664. cmp PUT_BITS, #0x30
  1665. b.lt 47f
  1666. emit_byte
  1667. emit_byte
  1668. emit_byte
  1669. emit_byte
  1670. emit_byte
  1671. emit_byte
  1672. 47:
  1673. .endm
  1674. .macro generate_jsimd_huff_encode_one_block fast_tbl
  1675. .if \fast_tbl == 1
  1676. asm_function jsimd_huff_encode_one_block_neon
  1677. .else
  1678. asm_function jsimd_huff_encode_one_block_neon_slowtbl
  1679. .endif
  1680. sub sp, sp, 272
  1681. sub BUFFER, BUFFER, #0x1 /* BUFFER=buffer-- */
  1682. /* Save Arm registers */
  1683. stp x19, x20, [sp]
  1684. get_symbol_loc x15, Ljsimd_huff_encode_one_block_neon_consts
  1685. ldr PUT_BUFFER, [x0, #0x10]
  1686. ldr PUT_BITSw, [x0, #0x18]
  1687. ldrsh w12, [x2] /* load DC coeff in w12 */
  1688. /* prepare data */
  1689. .if \fast_tbl == 1
  1690. ld1 {v23.16b}, [x15], #16
  1691. ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x15], #64
  1692. ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x15], #64
  1693. ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x15], #64
  1694. ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x2], #64
  1695. ld1 {v28.16b, v29.16b, v30.16b, v31.16b}, [x2], #64
  1696. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  1697. /* ZigZag 8x8 */
  1698. tbl v0.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v0.16b
  1699. tbl v1.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v1.16b
  1700. tbl v2.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v2.16b
  1701. tbl v3.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v3.16b
  1702. tbl v4.16b, {v28.16b, v29.16b, v30.16b, v31.16b}, v4.16b
  1703. tbl v5.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v5.16b
  1704. tbl v6.16b, {v27.16b, v28.16b, v29.16b, v30.16b}, v6.16b
  1705. tbl v7.16b, {v29.16b, v30.16b, v31.16b}, v7.16b
  1706. ins v0.h[0], w12
  1707. tbx v1.16b, {v28.16b}, v16.16b
  1708. tbx v2.16b, {v29.16b, v30.16b}, v17.16b
  1709. tbx v5.16b, {v29.16b, v30.16b}, v18.16b
  1710. tbx v6.16b, {v31.16b}, v19.16b
  1711. .else
  1712. add x13, x2, #0x22
  1713. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  1714. ld1 {v23.16b}, [x15]
  1715. add x14, x2, #0x18
  1716. add x3, x2, #0x36
  1717. ins v0.h[0], w12
  1718. add x9, x2, #0x2
  1719. ld1 {v1.h}[0], [x13]
  1720. add x15, x2, #0x30
  1721. ld1 {v2.h}[0], [x14]
  1722. add x19, x2, #0x26
  1723. ld1 {v3.h}[0], [x3]
  1724. add x20, x2, #0x28
  1725. ld1 {v0.h}[1], [x9]
  1726. add x12, x2, #0x10
  1727. ld1 {v1.h}[1], [x15]
  1728. add x13, x2, #0x40
  1729. ld1 {v2.h}[1], [x19]
  1730. add x14, x2, #0x34
  1731. ld1 {v3.h}[1], [x20]
  1732. add x3, x2, #0x1a
  1733. ld1 {v0.h}[2], [x12]
  1734. add x9, x2, #0x20
  1735. ld1 {v1.h}[2], [x13]
  1736. add x15, x2, #0x32
  1737. ld1 {v2.h}[2], [x14]
  1738. add x19, x2, #0x42
  1739. ld1 {v3.h}[2], [x3]
  1740. add x20, x2, #0xc
  1741. ld1 {v0.h}[3], [x9]
  1742. add x12, x2, #0x12
  1743. ld1 {v1.h}[3], [x15]
  1744. add x13, x2, #0x24
  1745. ld1 {v2.h}[3], [x19]
  1746. add x14, x2, #0x50
  1747. ld1 {v3.h}[3], [x20]
  1748. add x3, x2, #0xe
  1749. ld1 {v0.h}[4], [x12]
  1750. add x9, x2, #0x4
  1751. ld1 {v1.h}[4], [x13]
  1752. add x15, x2, #0x16
  1753. ld1 {v2.h}[4], [x14]
  1754. add x19, x2, #0x60
  1755. ld1 {v3.h}[4], [x3]
  1756. add x20, x2, #0x1c
  1757. ld1 {v0.h}[5], [x9]
  1758. add x12, x2, #0x6
  1759. ld1 {v1.h}[5], [x15]
  1760. add x13, x2, #0x8
  1761. ld1 {v2.h}[5], [x19]
  1762. add x14, x2, #0x52
  1763. ld1 {v3.h}[5], [x20]
  1764. add x3, x2, #0x2a
  1765. ld1 {v0.h}[6], [x12]
  1766. add x9, x2, #0x14
  1767. ld1 {v1.h}[6], [x13]
  1768. add x15, x2, #0xa
  1769. ld1 {v2.h}[6], [x14]
  1770. add x19, x2, #0x44
  1771. ld1 {v3.h}[6], [x3]
  1772. add x20, x2, #0x38
  1773. ld1 {v0.h}[7], [x9]
  1774. add x12, x2, #0x46
  1775. ld1 {v1.h}[7], [x15]
  1776. add x13, x2, #0x3a
  1777. ld1 {v2.h}[7], [x19]
  1778. add x14, x2, #0x74
  1779. ld1 {v3.h}[7], [x20]
  1780. add x3, x2, #0x6a
  1781. ld1 {v4.h}[0], [x12]
  1782. add x9, x2, #0x54
  1783. ld1 {v5.h}[0], [x13]
  1784. add x15, x2, #0x2c
  1785. ld1 {v6.h}[0], [x14]
  1786. add x19, x2, #0x76
  1787. ld1 {v7.h}[0], [x3]
  1788. add x20, x2, #0x78
  1789. ld1 {v4.h}[1], [x9]
  1790. add x12, x2, #0x62
  1791. ld1 {v5.h}[1], [x15]
  1792. add x13, x2, #0x1e
  1793. ld1 {v6.h}[1], [x19]
  1794. add x14, x2, #0x68
  1795. ld1 {v7.h}[1], [x20]
  1796. add x3, x2, #0x7a
  1797. ld1 {v4.h}[2], [x12]
  1798. add x9, x2, #0x70
  1799. ld1 {v5.h}[2], [x13]
  1800. add x15, x2, #0x2e
  1801. ld1 {v6.h}[2], [x14]
  1802. add x19, x2, #0x5a
  1803. ld1 {v7.h}[2], [x3]
  1804. add x20, x2, #0x6c
  1805. ld1 {v4.h}[3], [x9]
  1806. add x12, x2, #0x72
  1807. ld1 {v5.h}[3], [x15]
  1808. add x13, x2, #0x3c
  1809. ld1 {v6.h}[3], [x19]
  1810. add x14, x2, #0x4c
  1811. ld1 {v7.h}[3], [x20]
  1812. add x3, x2, #0x5e
  1813. ld1 {v4.h}[4], [x12]
  1814. add x9, x2, #0x64
  1815. ld1 {v5.h}[4], [x13]
  1816. add x15, x2, #0x4a
  1817. ld1 {v6.h}[4], [x14]
  1818. add x19, x2, #0x3e
  1819. ld1 {v7.h}[4], [x3]
  1820. add x20, x2, #0x6e
  1821. ld1 {v4.h}[5], [x9]
  1822. add x12, x2, #0x56
  1823. ld1 {v5.h}[5], [x15]
  1824. add x13, x2, #0x58
  1825. ld1 {v6.h}[5], [x19]
  1826. add x14, x2, #0x4e
  1827. ld1 {v7.h}[5], [x20]
  1828. add x3, x2, #0x7c
  1829. ld1 {v4.h}[6], [x12]
  1830. add x9, x2, #0x48
  1831. ld1 {v5.h}[6], [x13]
  1832. add x15, x2, #0x66
  1833. ld1 {v6.h}[6], [x14]
  1834. add x19, x2, #0x5c
  1835. ld1 {v7.h}[6], [x3]
  1836. add x20, x2, #0x7e
  1837. ld1 {v4.h}[7], [x9]
  1838. ld1 {v5.h}[7], [x15]
  1839. ld1 {v6.h}[7], [x19]
  1840. ld1 {v7.h}[7], [x20]
  1841. .endif
  1842. cmlt v24.8h, v0.8h, #0
  1843. cmlt v25.8h, v1.8h, #0
  1844. cmlt v26.8h, v2.8h, #0
  1845. cmlt v27.8h, v3.8h, #0
  1846. cmlt v28.8h, v4.8h, #0
  1847. cmlt v29.8h, v5.8h, #0
  1848. cmlt v30.8h, v6.8h, #0
  1849. cmlt v31.8h, v7.8h, #0
  1850. abs v0.8h, v0.8h
  1851. abs v1.8h, v1.8h
  1852. abs v2.8h, v2.8h
  1853. abs v3.8h, v3.8h
  1854. abs v4.8h, v4.8h
  1855. abs v5.8h, v5.8h
  1856. abs v6.8h, v6.8h
  1857. abs v7.8h, v7.8h
  1858. eor v24.16b, v24.16b, v0.16b
  1859. eor v25.16b, v25.16b, v1.16b
  1860. eor v26.16b, v26.16b, v2.16b
  1861. eor v27.16b, v27.16b, v3.16b
  1862. eor v28.16b, v28.16b, v4.16b
  1863. eor v29.16b, v29.16b, v5.16b
  1864. eor v30.16b, v30.16b, v6.16b
  1865. eor v31.16b, v31.16b, v7.16b
  1866. cmeq v16.8h, v0.8h, #0
  1867. cmeq v17.8h, v1.8h, #0
  1868. cmeq v18.8h, v2.8h, #0
  1869. cmeq v19.8h, v3.8h, #0
  1870. cmeq v20.8h, v4.8h, #0
  1871. cmeq v21.8h, v5.8h, #0
  1872. cmeq v22.8h, v6.8h, #0
  1873. xtn v16.8b, v16.8h
  1874. xtn v18.8b, v18.8h
  1875. xtn v20.8b, v20.8h
  1876. xtn v22.8b, v22.8h
  1877. umov w14, v0.h[0]
  1878. xtn2 v16.16b, v17.8h
  1879. umov w13, v24.h[0]
  1880. xtn2 v18.16b, v19.8h
  1881. clz w14, w14
  1882. xtn2 v20.16b, v21.8h
  1883. lsl w13, w13, w14
  1884. cmeq v17.8h, v7.8h, #0
  1885. sub w12, w14, #32
  1886. xtn2 v22.16b, v17.8h
  1887. lsr w13, w13, w14
  1888. and v16.16b, v16.16b, v23.16b
  1889. neg w12, w12
  1890. and v18.16b, v18.16b, v23.16b
  1891. add x3, x4, #0x400 /* r1 = dctbl->ehufsi */
  1892. and v20.16b, v20.16b, v23.16b
  1893. add x15, sp, #0x90 /* x15 = t2 */
  1894. and v22.16b, v22.16b, v23.16b
  1895. ldr w10, [x4, x12, lsl #2]
  1896. addp v16.16b, v16.16b, v18.16b
  1897. ldrb w11, [x3, x12]
  1898. addp v20.16b, v20.16b, v22.16b
  1899. checkbuf47
  1900. addp v16.16b, v16.16b, v20.16b
  1901. put_bits x10, x11
  1902. addp v16.16b, v16.16b, v18.16b
  1903. checkbuf47
  1904. umov x9, v16.D[0]
  1905. put_bits x13, x12
  1906. cnt v17.8b, v16.8b
  1907. mvn x9, x9
  1908. addv B18, v17.8b
  1909. add x4, x5, #0x400 /* x4 = actbl->ehufsi */
  1910. umov w12, v18.b[0]
  1911. lsr x9, x9, #0x1 /* clear AC coeff */
  1912. ldr w13, [x5, #0x3c0] /* x13 = actbl->ehufco[0xf0] */
  1913. rbit x9, x9 /* x9 = index0 */
  1914. ldrb w14, [x4, #0xf0] /* x14 = actbl->ehufsi[0xf0] */
  1915. cmp w12, #(64-8)
  1916. add x11, sp, #16
  1917. b.lt 4f
  1918. cbz x9, 6f
  1919. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  1920. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  1921. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  1922. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  1923. 1:
  1924. clz x2, x9
  1925. add x15, x15, x2, lsl #1
  1926. lsl x9, x9, x2
  1927. ldrh w20, [x15, #-126]
  1928. 2:
  1929. cmp x2, #0x10
  1930. b.lt 3f
  1931. sub x2, x2, #0x10
  1932. checkbuf47
  1933. put_bits x13, x14
  1934. b 2b
  1935. 3:
  1936. clz w20, w20
  1937. ldrh w3, [x15, #2]!
  1938. sub w11, w20, #32
  1939. lsl w3, w3, w20
  1940. neg w11, w11
  1941. lsr w3, w3, w20
  1942. add x2, x11, x2, lsl #4
  1943. lsl x9, x9, #0x1
  1944. ldr w12, [x5, x2, lsl #2]
  1945. ldrb w10, [x4, x2]
  1946. checkbuf31
  1947. put_bits x12, x10
  1948. put_bits x3, x11
  1949. cbnz x9, 1b
  1950. b 6f
  1951. 4:
  1952. movi v21.8h, #0x0010
  1953. clz v0.8h, v0.8h
  1954. clz v1.8h, v1.8h
  1955. clz v2.8h, v2.8h
  1956. clz v3.8h, v3.8h
  1957. clz v4.8h, v4.8h
  1958. clz v5.8h, v5.8h
  1959. clz v6.8h, v6.8h
  1960. clz v7.8h, v7.8h
  1961. ushl v24.8h, v24.8h, v0.8h
  1962. ushl v25.8h, v25.8h, v1.8h
  1963. ushl v26.8h, v26.8h, v2.8h
  1964. ushl v27.8h, v27.8h, v3.8h
  1965. ushl v28.8h, v28.8h, v4.8h
  1966. ushl v29.8h, v29.8h, v5.8h
  1967. ushl v30.8h, v30.8h, v6.8h
  1968. ushl v31.8h, v31.8h, v7.8h
  1969. neg v0.8h, v0.8h
  1970. neg v1.8h, v1.8h
  1971. neg v2.8h, v2.8h
  1972. neg v3.8h, v3.8h
  1973. neg v4.8h, v4.8h
  1974. neg v5.8h, v5.8h
  1975. neg v6.8h, v6.8h
  1976. neg v7.8h, v7.8h
  1977. ushl v24.8h, v24.8h, v0.8h
  1978. ushl v25.8h, v25.8h, v1.8h
  1979. ushl v26.8h, v26.8h, v2.8h
  1980. ushl v27.8h, v27.8h, v3.8h
  1981. ushl v28.8h, v28.8h, v4.8h
  1982. ushl v29.8h, v29.8h, v5.8h
  1983. ushl v30.8h, v30.8h, v6.8h
  1984. ushl v31.8h, v31.8h, v7.8h
  1985. add v0.8h, v21.8h, v0.8h
  1986. add v1.8h, v21.8h, v1.8h
  1987. add v2.8h, v21.8h, v2.8h
  1988. add v3.8h, v21.8h, v3.8h
  1989. add v4.8h, v21.8h, v4.8h
  1990. add v5.8h, v21.8h, v5.8h
  1991. add v6.8h, v21.8h, v6.8h
  1992. add v7.8h, v21.8h, v7.8h
  1993. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  1994. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  1995. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  1996. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  1997. 1:
  1998. clz x2, x9
  1999. add x15, x15, x2, lsl #1
  2000. lsl x9, x9, x2
  2001. ldrh w11, [x15, #-126]
  2002. 2:
  2003. cmp x2, #0x10
  2004. b.lt 3f
  2005. sub x2, x2, #0x10
  2006. checkbuf47
  2007. put_bits x13, x14
  2008. b 2b
  2009. 3:
  2010. ldrh w3, [x15, #2]!
  2011. add x2, x11, x2, lsl #4
  2012. lsl x9, x9, #0x1
  2013. ldr w12, [x5, x2, lsl #2]
  2014. ldrb w10, [x4, x2]
  2015. checkbuf31
  2016. put_bits x12, x10
  2017. put_bits x3, x11
  2018. cbnz x9, 1b
  2019. 6:
  2020. add x13, sp, #0x10e
  2021. cmp x15, x13
  2022. b.hs 1f
  2023. ldr w12, [x5]
  2024. ldrb w14, [x4]
  2025. checkbuf47
  2026. put_bits x12, x14
  2027. 1:
  2028. str PUT_BUFFER, [x0, #0x10]
  2029. str PUT_BITSw, [x0, #0x18]
  2030. ldp x19, x20, [sp], 16
  2031. add x0, BUFFER, #0x1
  2032. add sp, sp, 256
  2033. br x30
  2034. .endm
  2035. generate_jsimd_huff_encode_one_block 1
  2036. generate_jsimd_huff_encode_one_block 0
  2037. .unreq BUFFER
  2038. .unreq PUT_BUFFER
  2039. .unreq PUT_BITS
  2040. .unreq PUT_BITSw
  2041. .purgem emit_byte
  2042. .purgem put_bits
  2043. .purgem checkbuf31
  2044. .purgem checkbuf47