dec_neon.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663
  1. // Copyright 2012 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // ARM NEON version of dsp functions and loop filtering.
  11. //
  12. // Authors: Somnath Banerjee (somnath@google.com)
  13. // Johann Koenig (johannkoenig@google.com)
  14. #include "./dsp.h"
  15. #if defined(WEBP_USE_NEON)
  16. #include "./neon.h"
  17. #include "../dec/vp8i_dec.h"
  18. //------------------------------------------------------------------------------
  19. // NxM Loading functions
  20. #if !defined(WORK_AROUND_GCC)
  21. // This intrinsics version makes gcc-4.6.3 crash during Load4x??() compilation
  22. // (register alloc, probably). The variants somewhat mitigate the problem, but
  23. // not quite. HFilter16i() remains problematic.
  24. static WEBP_INLINE uint8x8x4_t Load4x8_NEON(const uint8_t* const src,
  25. int stride) {
  26. const uint8x8_t zero = vdup_n_u8(0);
  27. uint8x8x4_t out;
  28. INIT_VECTOR4(out, zero, zero, zero, zero);
  29. out = vld4_lane_u8(src + 0 * stride, out, 0);
  30. out = vld4_lane_u8(src + 1 * stride, out, 1);
  31. out = vld4_lane_u8(src + 2 * stride, out, 2);
  32. out = vld4_lane_u8(src + 3 * stride, out, 3);
  33. out = vld4_lane_u8(src + 4 * stride, out, 4);
  34. out = vld4_lane_u8(src + 5 * stride, out, 5);
  35. out = vld4_lane_u8(src + 6 * stride, out, 6);
  36. out = vld4_lane_u8(src + 7 * stride, out, 7);
  37. return out;
  38. }
  39. static WEBP_INLINE void Load4x16_NEON(const uint8_t* const src, int stride,
  40. uint8x16_t* const p1,
  41. uint8x16_t* const p0,
  42. uint8x16_t* const q0,
  43. uint8x16_t* const q1) {
  44. // row0 = p1[0..7]|p0[0..7]|q0[0..7]|q1[0..7]
  45. // row8 = p1[8..15]|p0[8..15]|q0[8..15]|q1[8..15]
  46. const uint8x8x4_t row0 = Load4x8_NEON(src - 2 + 0 * stride, stride);
  47. const uint8x8x4_t row8 = Load4x8_NEON(src - 2 + 8 * stride, stride);
  48. *p1 = vcombine_u8(row0.val[0], row8.val[0]);
  49. *p0 = vcombine_u8(row0.val[1], row8.val[1]);
  50. *q0 = vcombine_u8(row0.val[2], row8.val[2]);
  51. *q1 = vcombine_u8(row0.val[3], row8.val[3]);
  52. }
  53. #else // WORK_AROUND_GCC
  54. #define LOADQ_LANE_32b(VALUE, LANE) do { \
  55. (VALUE) = vld1q_lane_u32((const uint32_t*)src, (VALUE), (LANE)); \
  56. src += stride; \
  57. } while (0)
  58. static WEBP_INLINE void Load4x16_NEON(const uint8_t* src, int stride,
  59. uint8x16_t* const p1,
  60. uint8x16_t* const p0,
  61. uint8x16_t* const q0,
  62. uint8x16_t* const q1) {
  63. const uint32x4_t zero = vdupq_n_u32(0);
  64. uint32x4x4_t in;
  65. INIT_VECTOR4(in, zero, zero, zero, zero);
  66. src -= 2;
  67. LOADQ_LANE_32b(in.val[0], 0);
  68. LOADQ_LANE_32b(in.val[1], 0);
  69. LOADQ_LANE_32b(in.val[2], 0);
  70. LOADQ_LANE_32b(in.val[3], 0);
  71. LOADQ_LANE_32b(in.val[0], 1);
  72. LOADQ_LANE_32b(in.val[1], 1);
  73. LOADQ_LANE_32b(in.val[2], 1);
  74. LOADQ_LANE_32b(in.val[3], 1);
  75. LOADQ_LANE_32b(in.val[0], 2);
  76. LOADQ_LANE_32b(in.val[1], 2);
  77. LOADQ_LANE_32b(in.val[2], 2);
  78. LOADQ_LANE_32b(in.val[3], 2);
  79. LOADQ_LANE_32b(in.val[0], 3);
  80. LOADQ_LANE_32b(in.val[1], 3);
  81. LOADQ_LANE_32b(in.val[2], 3);
  82. LOADQ_LANE_32b(in.val[3], 3);
  83. // Transpose four 4x4 parts:
  84. {
  85. const uint8x16x2_t row01 = vtrnq_u8(vreinterpretq_u8_u32(in.val[0]),
  86. vreinterpretq_u8_u32(in.val[1]));
  87. const uint8x16x2_t row23 = vtrnq_u8(vreinterpretq_u8_u32(in.val[2]),
  88. vreinterpretq_u8_u32(in.val[3]));
  89. const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
  90. vreinterpretq_u16_u8(row23.val[0]));
  91. const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
  92. vreinterpretq_u16_u8(row23.val[1]));
  93. *p1 = vreinterpretq_u8_u16(row02.val[0]);
  94. *p0 = vreinterpretq_u8_u16(row13.val[0]);
  95. *q0 = vreinterpretq_u8_u16(row02.val[1]);
  96. *q1 = vreinterpretq_u8_u16(row13.val[1]);
  97. }
  98. }
  99. #undef LOADQ_LANE_32b
  100. #endif // !WORK_AROUND_GCC
  101. static WEBP_INLINE void Load8x16_NEON(
  102. const uint8_t* const src, int stride,
  103. uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
  104. uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
  105. uint8x16_t* const q2, uint8x16_t* const q3) {
  106. Load4x16_NEON(src - 2, stride, p3, p2, p1, p0);
  107. Load4x16_NEON(src + 2, stride, q0, q1, q2, q3);
  108. }
  109. static WEBP_INLINE void Load16x4_NEON(const uint8_t* const src, int stride,
  110. uint8x16_t* const p1,
  111. uint8x16_t* const p0,
  112. uint8x16_t* const q0,
  113. uint8x16_t* const q1) {
  114. *p1 = vld1q_u8(src - 2 * stride);
  115. *p0 = vld1q_u8(src - 1 * stride);
  116. *q0 = vld1q_u8(src + 0 * stride);
  117. *q1 = vld1q_u8(src + 1 * stride);
  118. }
  119. static WEBP_INLINE void Load16x8_NEON(
  120. const uint8_t* const src, int stride,
  121. uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
  122. uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
  123. uint8x16_t* const q2, uint8x16_t* const q3) {
  124. Load16x4_NEON(src - 2 * stride, stride, p3, p2, p1, p0);
  125. Load16x4_NEON(src + 2 * stride, stride, q0, q1, q2, q3);
  126. }
  127. static WEBP_INLINE void Load8x8x2_NEON(
  128. const uint8_t* const u, const uint8_t* const v, int stride,
  129. uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
  130. uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
  131. uint8x16_t* const q2, uint8x16_t* const q3) {
  132. // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
  133. // and the v-samples on the higher half.
  134. *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride));
  135. *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride));
  136. *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride));
  137. *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride));
  138. *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride));
  139. *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride));
  140. *q2 = vcombine_u8(vld1_u8(u + 2 * stride), vld1_u8(v + 2 * stride));
  141. *q3 = vcombine_u8(vld1_u8(u + 3 * stride), vld1_u8(v + 3 * stride));
  142. }
  143. #if !defined(WORK_AROUND_GCC)
  144. #define LOAD_UV_8(ROW) \
  145. vcombine_u8(vld1_u8(u - 4 + (ROW) * stride), vld1_u8(v - 4 + (ROW) * stride))
  146. static WEBP_INLINE void Load8x8x2T_NEON(
  147. const uint8_t* const u, const uint8_t* const v, int stride,
  148. uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1,
  149. uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1,
  150. uint8x16_t* const q2, uint8x16_t* const q3) {
  151. // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination
  152. // and the v-samples on the higher half.
  153. const uint8x16_t row0 = LOAD_UV_8(0);
  154. const uint8x16_t row1 = LOAD_UV_8(1);
  155. const uint8x16_t row2 = LOAD_UV_8(2);
  156. const uint8x16_t row3 = LOAD_UV_8(3);
  157. const uint8x16_t row4 = LOAD_UV_8(4);
  158. const uint8x16_t row5 = LOAD_UV_8(5);
  159. const uint8x16_t row6 = LOAD_UV_8(6);
  160. const uint8x16_t row7 = LOAD_UV_8(7);
  161. // Perform two side-by-side 8x8 transposes
  162. // u00 u01 u02 u03 u04 u05 u06 u07 | v00 v01 v02 v03 v04 v05 v06 v07
  163. // u10 u11 u12 u13 u14 u15 u16 u17 | v10 v11 v12 ...
  164. // u20 u21 u22 u23 u24 u25 u26 u27 | v20 v21 ...
  165. // u30 u31 u32 u33 u34 u35 u36 u37 | ...
  166. // u40 u41 u42 u43 u44 u45 u46 u47 | ...
  167. // u50 u51 u52 u53 u54 u55 u56 u57 | ...
  168. // u60 u61 u62 u63 u64 u65 u66 u67 | v60 ...
  169. // u70 u71 u72 u73 u74 u75 u76 u77 | v70 v71 v72 ...
  170. const uint8x16x2_t row01 = vtrnq_u8(row0, row1); // u00 u10 u02 u12 ...
  171. // u01 u11 u03 u13 ...
  172. const uint8x16x2_t row23 = vtrnq_u8(row2, row3); // u20 u30 u22 u32 ...
  173. // u21 u31 u23 u33 ...
  174. const uint8x16x2_t row45 = vtrnq_u8(row4, row5); // ...
  175. const uint8x16x2_t row67 = vtrnq_u8(row6, row7); // ...
  176. const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]),
  177. vreinterpretq_u16_u8(row23.val[0]));
  178. const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]),
  179. vreinterpretq_u16_u8(row23.val[1]));
  180. const uint16x8x2_t row46 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[0]),
  181. vreinterpretq_u16_u8(row67.val[0]));
  182. const uint16x8x2_t row57 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[1]),
  183. vreinterpretq_u16_u8(row67.val[1]));
  184. const uint32x4x2_t row04 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[0]),
  185. vreinterpretq_u32_u16(row46.val[0]));
  186. const uint32x4x2_t row26 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[1]),
  187. vreinterpretq_u32_u16(row46.val[1]));
  188. const uint32x4x2_t row15 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[0]),
  189. vreinterpretq_u32_u16(row57.val[0]));
  190. const uint32x4x2_t row37 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[1]),
  191. vreinterpretq_u32_u16(row57.val[1]));
  192. *p3 = vreinterpretq_u8_u32(row04.val[0]);
  193. *p2 = vreinterpretq_u8_u32(row15.val[0]);
  194. *p1 = vreinterpretq_u8_u32(row26.val[0]);
  195. *p0 = vreinterpretq_u8_u32(row37.val[0]);
  196. *q0 = vreinterpretq_u8_u32(row04.val[1]);
  197. *q1 = vreinterpretq_u8_u32(row15.val[1]);
  198. *q2 = vreinterpretq_u8_u32(row26.val[1]);
  199. *q3 = vreinterpretq_u8_u32(row37.val[1]);
  200. }
  201. #undef LOAD_UV_8
  202. #endif // !WORK_AROUND_GCC
  203. static WEBP_INLINE void Store2x8_NEON(const uint8x8x2_t v,
  204. uint8_t* const dst, int stride) {
  205. vst2_lane_u8(dst + 0 * stride, v, 0);
  206. vst2_lane_u8(dst + 1 * stride, v, 1);
  207. vst2_lane_u8(dst + 2 * stride, v, 2);
  208. vst2_lane_u8(dst + 3 * stride, v, 3);
  209. vst2_lane_u8(dst + 4 * stride, v, 4);
  210. vst2_lane_u8(dst + 5 * stride, v, 5);
  211. vst2_lane_u8(dst + 6 * stride, v, 6);
  212. vst2_lane_u8(dst + 7 * stride, v, 7);
  213. }
  214. static WEBP_INLINE void Store2x16_NEON(const uint8x16_t p0, const uint8x16_t q0,
  215. uint8_t* const dst, int stride) {
  216. uint8x8x2_t lo, hi;
  217. lo.val[0] = vget_low_u8(p0);
  218. lo.val[1] = vget_low_u8(q0);
  219. hi.val[0] = vget_high_u8(p0);
  220. hi.val[1] = vget_high_u8(q0);
  221. Store2x8_NEON(lo, dst - 1 + 0 * stride, stride);
  222. Store2x8_NEON(hi, dst - 1 + 8 * stride, stride);
  223. }
  224. #if !defined(WORK_AROUND_GCC)
  225. static WEBP_INLINE void Store4x8_NEON(const uint8x8x4_t v,
  226. uint8_t* const dst, int stride) {
  227. vst4_lane_u8(dst + 0 * stride, v, 0);
  228. vst4_lane_u8(dst + 1 * stride, v, 1);
  229. vst4_lane_u8(dst + 2 * stride, v, 2);
  230. vst4_lane_u8(dst + 3 * stride, v, 3);
  231. vst4_lane_u8(dst + 4 * stride, v, 4);
  232. vst4_lane_u8(dst + 5 * stride, v, 5);
  233. vst4_lane_u8(dst + 6 * stride, v, 6);
  234. vst4_lane_u8(dst + 7 * stride, v, 7);
  235. }
  236. static WEBP_INLINE void Store4x16_NEON(const uint8x16_t p1, const uint8x16_t p0,
  237. const uint8x16_t q0, const uint8x16_t q1,
  238. uint8_t* const dst, int stride) {
  239. uint8x8x4_t lo, hi;
  240. INIT_VECTOR4(lo,
  241. vget_low_u8(p1), vget_low_u8(p0),
  242. vget_low_u8(q0), vget_low_u8(q1));
  243. INIT_VECTOR4(hi,
  244. vget_high_u8(p1), vget_high_u8(p0),
  245. vget_high_u8(q0), vget_high_u8(q1));
  246. Store4x8_NEON(lo, dst - 2 + 0 * stride, stride);
  247. Store4x8_NEON(hi, dst - 2 + 8 * stride, stride);
  248. }
  249. #endif // !WORK_AROUND_GCC
  250. static WEBP_INLINE void Store16x2_NEON(const uint8x16_t p0, const uint8x16_t q0,
  251. uint8_t* const dst, int stride) {
  252. vst1q_u8(dst - stride, p0);
  253. vst1q_u8(dst, q0);
  254. }
  255. static WEBP_INLINE void Store16x4_NEON(const uint8x16_t p1, const uint8x16_t p0,
  256. const uint8x16_t q0, const uint8x16_t q1,
  257. uint8_t* const dst, int stride) {
  258. Store16x2_NEON(p1, p0, dst - stride, stride);
  259. Store16x2_NEON(q0, q1, dst + stride, stride);
  260. }
  261. static WEBP_INLINE void Store8x2x2_NEON(const uint8x16_t p0,
  262. const uint8x16_t q0,
  263. uint8_t* const u, uint8_t* const v,
  264. int stride) {
  265. // p0 and q0 contain the u+v samples packed in low/high halves.
  266. vst1_u8(u - stride, vget_low_u8(p0));
  267. vst1_u8(u, vget_low_u8(q0));
  268. vst1_u8(v - stride, vget_high_u8(p0));
  269. vst1_u8(v, vget_high_u8(q0));
  270. }
  271. static WEBP_INLINE void Store8x4x2_NEON(const uint8x16_t p1,
  272. const uint8x16_t p0,
  273. const uint8x16_t q0,
  274. const uint8x16_t q1,
  275. uint8_t* const u, uint8_t* const v,
  276. int stride) {
  277. // The p1...q1 registers contain the u+v samples packed in low/high halves.
  278. Store8x2x2_NEON(p1, p0, u - stride, v - stride, stride);
  279. Store8x2x2_NEON(q0, q1, u + stride, v + stride, stride);
  280. }
  281. #if !defined(WORK_AROUND_GCC)
  282. #define STORE6_LANE(DST, VAL0, VAL1, LANE) do { \
  283. vst3_lane_u8((DST) - 3, (VAL0), (LANE)); \
  284. vst3_lane_u8((DST) + 0, (VAL1), (LANE)); \
  285. (DST) += stride; \
  286. } while (0)
  287. static WEBP_INLINE void Store6x8x2_NEON(
  288. const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0,
  289. const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2,
  290. uint8_t* u, uint8_t* v, int stride) {
  291. uint8x8x3_t u0, u1, v0, v1;
  292. INIT_VECTOR3(u0, vget_low_u8(p2), vget_low_u8(p1), vget_low_u8(p0));
  293. INIT_VECTOR3(u1, vget_low_u8(q0), vget_low_u8(q1), vget_low_u8(q2));
  294. INIT_VECTOR3(v0, vget_high_u8(p2), vget_high_u8(p1), vget_high_u8(p0));
  295. INIT_VECTOR3(v1, vget_high_u8(q0), vget_high_u8(q1), vget_high_u8(q2));
  296. STORE6_LANE(u, u0, u1, 0);
  297. STORE6_LANE(u, u0, u1, 1);
  298. STORE6_LANE(u, u0, u1, 2);
  299. STORE6_LANE(u, u0, u1, 3);
  300. STORE6_LANE(u, u0, u1, 4);
  301. STORE6_LANE(u, u0, u1, 5);
  302. STORE6_LANE(u, u0, u1, 6);
  303. STORE6_LANE(u, u0, u1, 7);
  304. STORE6_LANE(v, v0, v1, 0);
  305. STORE6_LANE(v, v0, v1, 1);
  306. STORE6_LANE(v, v0, v1, 2);
  307. STORE6_LANE(v, v0, v1, 3);
  308. STORE6_LANE(v, v0, v1, 4);
  309. STORE6_LANE(v, v0, v1, 5);
  310. STORE6_LANE(v, v0, v1, 6);
  311. STORE6_LANE(v, v0, v1, 7);
  312. }
  313. #undef STORE6_LANE
  314. static WEBP_INLINE void Store4x8x2_NEON(const uint8x16_t p1,
  315. const uint8x16_t p0,
  316. const uint8x16_t q0,
  317. const uint8x16_t q1,
  318. uint8_t* const u, uint8_t* const v,
  319. int stride) {
  320. uint8x8x4_t u0, v0;
  321. INIT_VECTOR4(u0,
  322. vget_low_u8(p1), vget_low_u8(p0),
  323. vget_low_u8(q0), vget_low_u8(q1));
  324. INIT_VECTOR4(v0,
  325. vget_high_u8(p1), vget_high_u8(p0),
  326. vget_high_u8(q0), vget_high_u8(q1));
  327. vst4_lane_u8(u - 2 + 0 * stride, u0, 0);
  328. vst4_lane_u8(u - 2 + 1 * stride, u0, 1);
  329. vst4_lane_u8(u - 2 + 2 * stride, u0, 2);
  330. vst4_lane_u8(u - 2 + 3 * stride, u0, 3);
  331. vst4_lane_u8(u - 2 + 4 * stride, u0, 4);
  332. vst4_lane_u8(u - 2 + 5 * stride, u0, 5);
  333. vst4_lane_u8(u - 2 + 6 * stride, u0, 6);
  334. vst4_lane_u8(u - 2 + 7 * stride, u0, 7);
  335. vst4_lane_u8(v - 2 + 0 * stride, v0, 0);
  336. vst4_lane_u8(v - 2 + 1 * stride, v0, 1);
  337. vst4_lane_u8(v - 2 + 2 * stride, v0, 2);
  338. vst4_lane_u8(v - 2 + 3 * stride, v0, 3);
  339. vst4_lane_u8(v - 2 + 4 * stride, v0, 4);
  340. vst4_lane_u8(v - 2 + 5 * stride, v0, 5);
  341. vst4_lane_u8(v - 2 + 6 * stride, v0, 6);
  342. vst4_lane_u8(v - 2 + 7 * stride, v0, 7);
  343. }
  344. #endif // !WORK_AROUND_GCC
  345. // Zero extend 'v' to an int16x8_t.
  346. static WEBP_INLINE int16x8_t ConvertU8ToS16_NEON(uint8x8_t v) {
  347. return vreinterpretq_s16_u16(vmovl_u8(v));
  348. }
  349. // Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result
  350. // to the corresponding rows of 'dst'.
  351. static WEBP_INLINE void SaturateAndStore4x4_NEON(uint8_t* const dst,
  352. const int16x8_t dst01,
  353. const int16x8_t dst23) {
  354. // Unsigned saturate to 8b.
  355. const uint8x8_t dst01_u8 = vqmovun_s16(dst01);
  356. const uint8x8_t dst23_u8 = vqmovun_s16(dst23);
  357. // Store the results.
  358. vst1_lane_u32((uint32_t*)(dst + 0 * BPS), vreinterpret_u32_u8(dst01_u8), 0);
  359. vst1_lane_u32((uint32_t*)(dst + 1 * BPS), vreinterpret_u32_u8(dst01_u8), 1);
  360. vst1_lane_u32((uint32_t*)(dst + 2 * BPS), vreinterpret_u32_u8(dst23_u8), 0);
  361. vst1_lane_u32((uint32_t*)(dst + 3 * BPS), vreinterpret_u32_u8(dst23_u8), 1);
  362. }
  363. static WEBP_INLINE void Add4x4_NEON(const int16x8_t row01,
  364. const int16x8_t row23,
  365. uint8_t* const dst) {
  366. uint32x2_t dst01 = vdup_n_u32(0);
  367. uint32x2_t dst23 = vdup_n_u32(0);
  368. // Load the source pixels.
  369. dst01 = vld1_lane_u32((uint32_t*)(dst + 0 * BPS), dst01, 0);
  370. dst23 = vld1_lane_u32((uint32_t*)(dst + 2 * BPS), dst23, 0);
  371. dst01 = vld1_lane_u32((uint32_t*)(dst + 1 * BPS), dst01, 1);
  372. dst23 = vld1_lane_u32((uint32_t*)(dst + 3 * BPS), dst23, 1);
  373. {
  374. // Convert to 16b.
  375. const int16x8_t dst01_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst01));
  376. const int16x8_t dst23_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst23));
  377. // Descale with rounding.
  378. const int16x8_t out01 = vrsraq_n_s16(dst01_s16, row01, 3);
  379. const int16x8_t out23 = vrsraq_n_s16(dst23_s16, row23, 3);
  380. // Add the inverse transform.
  381. SaturateAndStore4x4_NEON(dst, out01, out23);
  382. }
  383. }
  384. //-----------------------------------------------------------------------------
  385. // Simple In-loop filtering (Paragraph 15.2)
  386. static uint8x16_t NeedsFilter_NEON(const uint8x16_t p1, const uint8x16_t p0,
  387. const uint8x16_t q0, const uint8x16_t q1,
  388. int thresh) {
  389. const uint8x16_t thresh_v = vdupq_n_u8((uint8_t)thresh);
  390. const uint8x16_t a_p0_q0 = vabdq_u8(p0, q0); // abs(p0-q0)
  391. const uint8x16_t a_p1_q1 = vabdq_u8(p1, q1); // abs(p1-q1)
  392. const uint8x16_t a_p0_q0_2 = vqaddq_u8(a_p0_q0, a_p0_q0); // 2 * abs(p0-q0)
  393. const uint8x16_t a_p1_q1_2 = vshrq_n_u8(a_p1_q1, 1); // abs(p1-q1) / 2
  394. const uint8x16_t sum = vqaddq_u8(a_p0_q0_2, a_p1_q1_2);
  395. const uint8x16_t mask = vcgeq_u8(thresh_v, sum);
  396. return mask;
  397. }
  398. static int8x16_t FlipSign_NEON(const uint8x16_t v) {
  399. const uint8x16_t sign_bit = vdupq_n_u8(0x80);
  400. return vreinterpretq_s8_u8(veorq_u8(v, sign_bit));
  401. }
  402. static uint8x16_t FlipSignBack_NEON(const int8x16_t v) {
  403. const int8x16_t sign_bit = vdupq_n_s8(0x80);
  404. return vreinterpretq_u8_s8(veorq_s8(v, sign_bit));
  405. }
  406. static int8x16_t GetBaseDelta_NEON(const int8x16_t p1, const int8x16_t p0,
  407. const int8x16_t q0, const int8x16_t q1) {
  408. const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
  409. const int8x16_t p1_q1 = vqsubq_s8(p1, q1); // (p1-q1)
  410. const int8x16_t s1 = vqaddq_s8(p1_q1, q0_p0); // (p1-q1) + 1 * (q0 - p0)
  411. const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // (p1-q1) + 2 * (q0 - p0)
  412. const int8x16_t s3 = vqaddq_s8(q0_p0, s2); // (p1-q1) + 3 * (q0 - p0)
  413. return s3;
  414. }
  415. static int8x16_t GetBaseDelta0_NEON(const int8x16_t p0, const int8x16_t q0) {
  416. const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0)
  417. const int8x16_t s1 = vqaddq_s8(q0_p0, q0_p0); // 2 * (q0 - p0)
  418. const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // 3 * (q0 - p0)
  419. return s2;
  420. }
  421. //------------------------------------------------------------------------------
  422. static void ApplyFilter2NoFlip_NEON(const int8x16_t p0s, const int8x16_t q0s,
  423. const int8x16_t delta,
  424. int8x16_t* const op0,
  425. int8x16_t* const oq0) {
  426. const int8x16_t kCst3 = vdupq_n_s8(0x03);
  427. const int8x16_t kCst4 = vdupq_n_s8(0x04);
  428. const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3);
  429. const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4);
  430. const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
  431. const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
  432. *op0 = vqaddq_s8(p0s, delta3);
  433. *oq0 = vqsubq_s8(q0s, delta4);
  434. }
  435. #if defined(WEBP_USE_INTRINSICS)
  436. static void ApplyFilter2_NEON(const int8x16_t p0s, const int8x16_t q0s,
  437. const int8x16_t delta,
  438. uint8x16_t* const op0, uint8x16_t* const oq0) {
  439. const int8x16_t kCst3 = vdupq_n_s8(0x03);
  440. const int8x16_t kCst4 = vdupq_n_s8(0x04);
  441. const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3);
  442. const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4);
  443. const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3);
  444. const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3);
  445. const int8x16_t sp0 = vqaddq_s8(p0s, delta3);
  446. const int8x16_t sq0 = vqsubq_s8(q0s, delta4);
  447. *op0 = FlipSignBack_NEON(sp0);
  448. *oq0 = FlipSignBack_NEON(sq0);
  449. }
  450. static void DoFilter2_NEON(const uint8x16_t p1, const uint8x16_t p0,
  451. const uint8x16_t q0, const uint8x16_t q1,
  452. const uint8x16_t mask,
  453. uint8x16_t* const op0, uint8x16_t* const oq0) {
  454. const int8x16_t p1s = FlipSign_NEON(p1);
  455. const int8x16_t p0s = FlipSign_NEON(p0);
  456. const int8x16_t q0s = FlipSign_NEON(q0);
  457. const int8x16_t q1s = FlipSign_NEON(q1);
  458. const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
  459. const int8x16_t delta1 = vandq_s8(delta0, vreinterpretq_s8_u8(mask));
  460. ApplyFilter2_NEON(p0s, q0s, delta1, op0, oq0);
  461. }
  462. static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) {
  463. uint8x16_t p1, p0, q0, q1, op0, oq0;
  464. Load16x4_NEON(p, stride, &p1, &p0, &q0, &q1);
  465. {
  466. const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
  467. DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0);
  468. }
  469. Store16x2_NEON(op0, oq0, p, stride);
  470. }
  471. static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) {
  472. uint8x16_t p1, p0, q0, q1, oq0, op0;
  473. Load4x16_NEON(p, stride, &p1, &p0, &q0, &q1);
  474. {
  475. const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
  476. DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0);
  477. }
  478. Store2x16_NEON(op0, oq0, p, stride);
  479. }
  480. #else
  481. // Load/Store vertical edge
  482. #define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \
  483. "vld4.8 {" #c1 "[0]," #c2 "[0]," #c3 "[0]," #c4 "[0]}," #b1 "," #stride "\n" \
  484. "vld4.8 {" #c1 "[1]," #c2 "[1]," #c3 "[1]," #c4 "[1]}," #b2 "," #stride "\n" \
  485. "vld4.8 {" #c1 "[2]," #c2 "[2]," #c3 "[2]," #c4 "[2]}," #b1 "," #stride "\n" \
  486. "vld4.8 {" #c1 "[3]," #c2 "[3]," #c3 "[3]," #c4 "[3]}," #b2 "," #stride "\n" \
  487. "vld4.8 {" #c1 "[4]," #c2 "[4]," #c3 "[4]," #c4 "[4]}," #b1 "," #stride "\n" \
  488. "vld4.8 {" #c1 "[5]," #c2 "[5]," #c3 "[5]," #c4 "[5]}," #b2 "," #stride "\n" \
  489. "vld4.8 {" #c1 "[6]," #c2 "[6]," #c3 "[6]," #c4 "[6]}," #b1 "," #stride "\n" \
  490. "vld4.8 {" #c1 "[7]," #c2 "[7]," #c3 "[7]," #c4 "[7]}," #b2 "," #stride "\n"
  491. #define STORE8x2(c1, c2, p, stride) \
  492. "vst2.8 {" #c1 "[0], " #c2 "[0]}," #p "," #stride " \n" \
  493. "vst2.8 {" #c1 "[1], " #c2 "[1]}," #p "," #stride " \n" \
  494. "vst2.8 {" #c1 "[2], " #c2 "[2]}," #p "," #stride " \n" \
  495. "vst2.8 {" #c1 "[3], " #c2 "[3]}," #p "," #stride " \n" \
  496. "vst2.8 {" #c1 "[4], " #c2 "[4]}," #p "," #stride " \n" \
  497. "vst2.8 {" #c1 "[5], " #c2 "[5]}," #p "," #stride " \n" \
  498. "vst2.8 {" #c1 "[6], " #c2 "[6]}," #p "," #stride " \n" \
  499. "vst2.8 {" #c1 "[7], " #c2 "[7]}," #p "," #stride " \n"
  500. #define QRegs "q0", "q1", "q2", "q3", \
  501. "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
  502. #define FLIP_SIGN_BIT2(a, b, s) \
  503. "veor " #a "," #a "," #s " \n" \
  504. "veor " #b "," #b "," #s " \n" \
  505. #define FLIP_SIGN_BIT4(a, b, c, d, s) \
  506. FLIP_SIGN_BIT2(a, b, s) \
  507. FLIP_SIGN_BIT2(c, d, s) \
  508. #define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \
  509. "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \
  510. "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \
  511. "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \
  512. "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \
  513. "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \
  514. "vdup.8 q14, " #thresh " \n" \
  515. "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */
  516. #define GET_BASE_DELTA(p1, p0, q0, q1, o) \
  517. "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \
  518. "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \
  519. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \
  520. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \
  521. "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */
  522. #define DO_SIMPLE_FILTER(p0, q0, fl) \
  523. "vmov.i8 q15, #0x03 \n" \
  524. "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \
  525. "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \
  526. "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \
  527. \
  528. "vmov.i8 q15, #0x04 \n" \
  529. "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \
  530. "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \
  531. "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */
  532. // Applies filter on 2 pixels (p0 and q0)
  533. #define DO_FILTER2(p1, p0, q0, q1, thresh) \
  534. NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \
  535. "vmov.i8 q10, #0x80 \n" /* sign bit */ \
  536. FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \
  537. GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \
  538. "vand q9, q9, q11 \n" /* apply filter mask */ \
  539. DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \
  540. FLIP_SIGN_BIT2(p0, q0, q10)
  541. static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) {
  542. __asm__ volatile (
  543. "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
  544. "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
  545. "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
  546. "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
  547. "vld1.u8 {q12}, [%[p]] \n" // q1
  548. DO_FILTER2(q1, q2, q3, q12, %[thresh])
  549. "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
  550. "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0
  551. "vst1.u8 {q3}, [%[p]] \n" // store oq0
  552. : [p] "+r"(p)
  553. : [stride] "r"(stride), [thresh] "r"(thresh)
  554. : "memory", QRegs
  555. );
  556. }
  557. static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) {
  558. __asm__ volatile (
  559. "sub r4, %[p], #2 \n" // base1 = p - 2
  560. "lsl r6, %[stride], #1 \n" // r6 = 2 * stride
  561. "add r5, r4, %[stride] \n" // base2 = base1 + stride
  562. LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
  563. LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
  564. "vswp d3, d24 \n" // p1:q1 p0:q3
  565. "vswp d5, d26 \n" // q0:q2 q1:q4
  566. "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
  567. DO_FILTER2(q1, q2, q12, q13, %[thresh])
  568. "sub %[p], %[p], #1 \n" // p - 1
  569. "vswp d5, d24 \n"
  570. STORE8x2(d4, d5, [%[p]], %[stride])
  571. STORE8x2(d24, d25, [%[p]], %[stride])
  572. : [p] "+r"(p)
  573. : [stride] "r"(stride), [thresh] "r"(thresh)
  574. : "memory", "r4", "r5", "r6", QRegs
  575. );
  576. }
  577. #undef LOAD8x4
  578. #undef STORE8x2
  579. #endif // WEBP_USE_INTRINSICS
  580. static void SimpleVFilter16i_NEON(uint8_t* p, int stride, int thresh) {
  581. uint32_t k;
  582. for (k = 3; k != 0; --k) {
  583. p += 4 * stride;
  584. SimpleVFilter16_NEON(p, stride, thresh);
  585. }
  586. }
  587. static void SimpleHFilter16i_NEON(uint8_t* p, int stride, int thresh) {
  588. uint32_t k;
  589. for (k = 3; k != 0; --k) {
  590. p += 4;
  591. SimpleHFilter16_NEON(p, stride, thresh);
  592. }
  593. }
  594. //------------------------------------------------------------------------------
  595. // Complex In-loop filtering (Paragraph 15.3)
  596. static uint8x16_t NeedsHev_NEON(const uint8x16_t p1, const uint8x16_t p0,
  597. const uint8x16_t q0, const uint8x16_t q1,
  598. int hev_thresh) {
  599. const uint8x16_t hev_thresh_v = vdupq_n_u8((uint8_t)hev_thresh);
  600. const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
  601. const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
  602. const uint8x16_t a_max = vmaxq_u8(a_p1_p0, a_q1_q0);
  603. const uint8x16_t mask = vcgtq_u8(a_max, hev_thresh_v);
  604. return mask;
  605. }
  606. static uint8x16_t NeedsFilter2_NEON(const uint8x16_t p3, const uint8x16_t p2,
  607. const uint8x16_t p1, const uint8x16_t p0,
  608. const uint8x16_t q0, const uint8x16_t q1,
  609. const uint8x16_t q2, const uint8x16_t q3,
  610. int ithresh, int thresh) {
  611. const uint8x16_t ithresh_v = vdupq_n_u8((uint8_t)ithresh);
  612. const uint8x16_t a_p3_p2 = vabdq_u8(p3, p2); // abs(p3 - p2)
  613. const uint8x16_t a_p2_p1 = vabdq_u8(p2, p1); // abs(p2 - p1)
  614. const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0)
  615. const uint8x16_t a_q3_q2 = vabdq_u8(q3, q2); // abs(q3 - q2)
  616. const uint8x16_t a_q2_q1 = vabdq_u8(q2, q1); // abs(q2 - q1)
  617. const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0)
  618. const uint8x16_t max1 = vmaxq_u8(a_p3_p2, a_p2_p1);
  619. const uint8x16_t max2 = vmaxq_u8(a_p1_p0, a_q3_q2);
  620. const uint8x16_t max3 = vmaxq_u8(a_q2_q1, a_q1_q0);
  621. const uint8x16_t max12 = vmaxq_u8(max1, max2);
  622. const uint8x16_t max123 = vmaxq_u8(max12, max3);
  623. const uint8x16_t mask2 = vcgeq_u8(ithresh_v, max123);
  624. const uint8x16_t mask1 = NeedsFilter_NEON(p1, p0, q0, q1, thresh);
  625. const uint8x16_t mask = vandq_u8(mask1, mask2);
  626. return mask;
  627. }
  628. // 4-points filter
  629. static void ApplyFilter4_NEON(
  630. const int8x16_t p1, const int8x16_t p0,
  631. const int8x16_t q0, const int8x16_t q1,
  632. const int8x16_t delta0,
  633. uint8x16_t* const op1, uint8x16_t* const op0,
  634. uint8x16_t* const oq0, uint8x16_t* const oq1) {
  635. const int8x16_t kCst3 = vdupq_n_s8(0x03);
  636. const int8x16_t kCst4 = vdupq_n_s8(0x04);
  637. const int8x16_t delta1 = vqaddq_s8(delta0, kCst4);
  638. const int8x16_t delta2 = vqaddq_s8(delta0, kCst3);
  639. const int8x16_t a1 = vshrq_n_s8(delta1, 3);
  640. const int8x16_t a2 = vshrq_n_s8(delta2, 3);
  641. const int8x16_t a3 = vrshrq_n_s8(a1, 1); // a3 = (a1 + 1) >> 1
  642. *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a2)); // clip(p0 + a2)
  643. *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - a1)
  644. *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a3)); // clip(p1 + a3)
  645. *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a3)); // clip(q1 - a3)
  646. }
  647. static void DoFilter4_NEON(
  648. const uint8x16_t p1, const uint8x16_t p0,
  649. const uint8x16_t q0, const uint8x16_t q1,
  650. const uint8x16_t mask, const uint8x16_t hev_mask,
  651. uint8x16_t* const op1, uint8x16_t* const op0,
  652. uint8x16_t* const oq0, uint8x16_t* const oq1) {
  653. // This is a fused version of DoFilter2() calling ApplyFilter2 directly
  654. const int8x16_t p1s = FlipSign_NEON(p1);
  655. int8x16_t p0s = FlipSign_NEON(p0);
  656. int8x16_t q0s = FlipSign_NEON(q0);
  657. const int8x16_t q1s = FlipSign_NEON(q1);
  658. const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
  659. // do_filter2 part (simple loopfilter on pixels with hev)
  660. {
  661. const int8x16_t delta = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
  662. const int8x16_t simple_lf_delta =
  663. vandq_s8(delta, vreinterpretq_s8_u8(simple_lf_mask));
  664. ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s);
  665. }
  666. // do_filter4 part (complex loopfilter on pixels without hev)
  667. {
  668. const int8x16_t delta0 = GetBaseDelta0_NEON(p0s, q0s);
  669. // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
  670. const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
  671. const int8x16_t complex_lf_delta =
  672. vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
  673. ApplyFilter4_NEON(p1s, p0s, q0s, q1s, complex_lf_delta, op1, op0, oq0, oq1);
  674. }
  675. }
  676. // 6-points filter
  677. static void ApplyFilter6_NEON(
  678. const int8x16_t p2, const int8x16_t p1, const int8x16_t p0,
  679. const int8x16_t q0, const int8x16_t q1, const int8x16_t q2,
  680. const int8x16_t delta,
  681. uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
  682. uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
  683. // We have to compute: X = (9*a+63) >> 7, Y = (18*a+63)>>7, Z = (27*a+63) >> 7
  684. // Turns out, there's a common sub-expression S=9 * a - 1 that can be used
  685. // with the special vqrshrn_n_s16 rounding-shift-and-narrow instruction:
  686. // X = (S + 64) >> 7, Y = (S + 32) >> 6, Z = (18 * a + S + 64) >> 7
  687. const int8x8_t delta_lo = vget_low_s8(delta);
  688. const int8x8_t delta_hi = vget_high_s8(delta);
  689. const int8x8_t kCst9 = vdup_n_s8(9);
  690. const int16x8_t kCstm1 = vdupq_n_s16(-1);
  691. const int8x8_t kCst18 = vdup_n_s8(18);
  692. const int16x8_t S_lo = vmlal_s8(kCstm1, kCst9, delta_lo); // S = 9 * a - 1
  693. const int16x8_t S_hi = vmlal_s8(kCstm1, kCst9, delta_hi);
  694. const int16x8_t Z_lo = vmlal_s8(S_lo, kCst18, delta_lo); // S + 18 * a
  695. const int16x8_t Z_hi = vmlal_s8(S_hi, kCst18, delta_hi);
  696. const int8x8_t a3_lo = vqrshrn_n_s16(S_lo, 7); // (9 * a + 63) >> 7
  697. const int8x8_t a3_hi = vqrshrn_n_s16(S_hi, 7);
  698. const int8x8_t a2_lo = vqrshrn_n_s16(S_lo, 6); // (9 * a + 31) >> 6
  699. const int8x8_t a2_hi = vqrshrn_n_s16(S_hi, 6);
  700. const int8x8_t a1_lo = vqrshrn_n_s16(Z_lo, 7); // (27 * a + 63) >> 7
  701. const int8x8_t a1_hi = vqrshrn_n_s16(Z_hi, 7);
  702. const int8x16_t a1 = vcombine_s8(a1_lo, a1_hi);
  703. const int8x16_t a2 = vcombine_s8(a2_lo, a2_hi);
  704. const int8x16_t a3 = vcombine_s8(a3_lo, a3_hi);
  705. *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a1)); // clip(p0 + a1)
  706. *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - q1)
  707. *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a2)); // clip(q1 - a2)
  708. *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a2)); // clip(p1 + a2)
  709. *oq2 = FlipSignBack_NEON(vqsubq_s8(q2, a3)); // clip(q2 - a3)
  710. *op2 = FlipSignBack_NEON(vqaddq_s8(p2, a3)); // clip(p2 + a3)
  711. }
  712. static void DoFilter6_NEON(
  713. const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0,
  714. const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2,
  715. const uint8x16_t mask, const uint8x16_t hev_mask,
  716. uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0,
  717. uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) {
  718. // This is a fused version of DoFilter2() calling ApplyFilter2 directly
  719. const int8x16_t p2s = FlipSign_NEON(p2);
  720. const int8x16_t p1s = FlipSign_NEON(p1);
  721. int8x16_t p0s = FlipSign_NEON(p0);
  722. int8x16_t q0s = FlipSign_NEON(q0);
  723. const int8x16_t q1s = FlipSign_NEON(q1);
  724. const int8x16_t q2s = FlipSign_NEON(q2);
  725. const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask);
  726. const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s);
  727. // do_filter2 part (simple loopfilter on pixels with hev)
  728. {
  729. const int8x16_t simple_lf_delta =
  730. vandq_s8(delta0, vreinterpretq_s8_u8(simple_lf_mask));
  731. ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s);
  732. }
  733. // do_filter6 part (complex loopfilter on pixels without hev)
  734. {
  735. // we use: (mask & hev_mask) ^ mask = mask & !hev_mask
  736. const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask);
  737. const int8x16_t complex_lf_delta =
  738. vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask));
  739. ApplyFilter6_NEON(p2s, p1s, p0s, q0s, q1s, q2s, complex_lf_delta,
  740. op2, op1, op0, oq0, oq1, oq2);
  741. }
  742. }
  743. // on macroblock edges
  744. static void VFilter16_NEON(uint8_t* p, int stride,
  745. int thresh, int ithresh, int hev_thresh) {
  746. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  747. Load16x8_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  748. {
  749. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  750. ithresh, thresh);
  751. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  752. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  753. DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  754. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  755. Store16x2_NEON(op2, op1, p - 2 * stride, stride);
  756. Store16x2_NEON(op0, oq0, p + 0 * stride, stride);
  757. Store16x2_NEON(oq1, oq2, p + 2 * stride, stride);
  758. }
  759. }
  760. static void HFilter16_NEON(uint8_t* p, int stride,
  761. int thresh, int ithresh, int hev_thresh) {
  762. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  763. Load8x16_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  764. {
  765. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  766. ithresh, thresh);
  767. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  768. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  769. DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  770. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  771. Store2x16_NEON(op2, op1, p - 2, stride);
  772. Store2x16_NEON(op0, oq0, p + 0, stride);
  773. Store2x16_NEON(oq1, oq2, p + 2, stride);
  774. }
  775. }
  776. // on three inner edges
  777. static void VFilter16i_NEON(uint8_t* p, int stride,
  778. int thresh, int ithresh, int hev_thresh) {
  779. uint32_t k;
  780. uint8x16_t p3, p2, p1, p0;
  781. Load16x4_NEON(p + 2 * stride, stride, &p3, &p2, &p1, &p0);
  782. for (k = 3; k != 0; --k) {
  783. uint8x16_t q0, q1, q2, q3;
  784. p += 4 * stride;
  785. Load16x4_NEON(p + 2 * stride, stride, &q0, &q1, &q2, &q3);
  786. {
  787. const uint8x16_t mask =
  788. NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
  789. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  790. // p3 and p2 are not just temporary variables here: they will be
  791. // re-used for next span. And q2/q3 will become p1/p0 accordingly.
  792. DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
  793. Store16x4_NEON(p1, p0, p3, p2, p, stride);
  794. p1 = q2;
  795. p0 = q3;
  796. }
  797. }
  798. }
  799. #if !defined(WORK_AROUND_GCC)
  800. static void HFilter16i_NEON(uint8_t* p, int stride,
  801. int thresh, int ithresh, int hev_thresh) {
  802. uint32_t k;
  803. uint8x16_t p3, p2, p1, p0;
  804. Load4x16_NEON(p + 2, stride, &p3, &p2, &p1, &p0);
  805. for (k = 3; k != 0; --k) {
  806. uint8x16_t q0, q1, q2, q3;
  807. p += 4;
  808. Load4x16_NEON(p + 2, stride, &q0, &q1, &q2, &q3);
  809. {
  810. const uint8x16_t mask =
  811. NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh);
  812. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  813. DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2);
  814. Store4x16_NEON(p1, p0, p3, p2, p, stride);
  815. p1 = q2;
  816. p0 = q3;
  817. }
  818. }
  819. }
  820. #endif // !WORK_AROUND_GCC
  821. // 8-pixels wide variant, for chroma filtering
  822. static void VFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
  823. int thresh, int ithresh, int hev_thresh) {
  824. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  825. Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  826. {
  827. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  828. ithresh, thresh);
  829. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  830. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  831. DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  832. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  833. Store8x2x2_NEON(op2, op1, u - 2 * stride, v - 2 * stride, stride);
  834. Store8x2x2_NEON(op0, oq0, u + 0 * stride, v + 0 * stride, stride);
  835. Store8x2x2_NEON(oq1, oq2, u + 2 * stride, v + 2 * stride, stride);
  836. }
  837. }
  838. static void VFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
  839. int thresh, int ithresh, int hev_thresh) {
  840. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  841. u += 4 * stride;
  842. v += 4 * stride;
  843. Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  844. {
  845. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  846. ithresh, thresh);
  847. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  848. uint8x16_t op1, op0, oq0, oq1;
  849. DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
  850. Store8x4x2_NEON(op1, op0, oq0, oq1, u, v, stride);
  851. }
  852. }
  853. #if !defined(WORK_AROUND_GCC)
  854. static void HFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
  855. int thresh, int ithresh, int hev_thresh) {
  856. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  857. Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  858. {
  859. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  860. ithresh, thresh);
  861. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  862. uint8x16_t op2, op1, op0, oq0, oq1, oq2;
  863. DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask,
  864. &op2, &op1, &op0, &oq0, &oq1, &oq2);
  865. Store6x8x2_NEON(op2, op1, op0, oq0, oq1, oq2, u, v, stride);
  866. }
  867. }
  868. static void HFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
  869. int thresh, int ithresh, int hev_thresh) {
  870. uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
  871. u += 4;
  872. v += 4;
  873. Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
  874. {
  875. const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3,
  876. ithresh, thresh);
  877. const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh);
  878. uint8x16_t op1, op0, oq0, oq1;
  879. DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1);
  880. Store4x8x2_NEON(op1, op0, oq0, oq1, u, v, stride);
  881. }
  882. }
  883. #endif // !WORK_AROUND_GCC
  884. //-----------------------------------------------------------------------------
  885. // Inverse transforms (Paragraph 14.4)
  886. // Technically these are unsigned but vqdmulh is only available in signed.
  887. // vqdmulh returns high half (effectively >> 16) but also doubles the value,
  888. // changing the >> 16 to >> 15 and requiring an additional >> 1.
  889. // We use this to our advantage with kC2. The canonical value is 35468.
  890. // However, the high bit is set so treating it as signed will give incorrect
  891. // results. We avoid this by down shifting by 1 here to clear the highest bit.
  892. // Combined with the doubling effect of vqdmulh we get >> 16.
  893. // This can not be applied to kC1 because the lowest bit is set. Down shifting
  894. // the constant would reduce precision.
  895. // libwebp uses a trick to avoid some extra addition that libvpx does.
  896. // Instead of:
  897. // temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
  898. // libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
  899. // same issue with kC1 and vqdmulh that we work around by down shifting kC2
  900. static const int16_t kC1 = 20091;
  901. static const int16_t kC2 = 17734; // half of kC2, actually. See comment above.
  902. #if defined(WEBP_USE_INTRINSICS)
  903. static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0,
  904. const int16x8_t in1,
  905. int16x8x2_t* const out) {
  906. // a0 a1 a2 a3 | b0 b1 b2 b3 => a0 b0 c0 d0 | a1 b1 c1 d1
  907. // c0 c1 c2 c3 | d0 d1 d2 d3 a2 b2 c2 d2 | a3 b3 c3 d3
  908. const int16x8x2_t tmp0 = vzipq_s16(in0, in1); // a0 c0 a1 c1 a2 c2 ...
  909. // b0 d0 b1 d1 b2 d2 ...
  910. *out = vzipq_s16(tmp0.val[0], tmp0.val[1]);
  911. }
  912. static WEBP_INLINE void TransformPass_NEON(int16x8x2_t* const rows) {
  913. // {rows} = in0 | in4
  914. // in8 | in12
  915. // B1 = in4 | in12
  916. const int16x8_t B1 =
  917. vcombine_s16(vget_high_s16(rows->val[0]), vget_high_s16(rows->val[1]));
  918. // C0 = kC1 * in4 | kC1 * in12
  919. // C1 = kC2 * in4 | kC2 * in12
  920. const int16x8_t C0 = vsraq_n_s16(B1, vqdmulhq_n_s16(B1, kC1), 1);
  921. const int16x8_t C1 = vqdmulhq_n_s16(B1, kC2);
  922. const int16x4_t a = vqadd_s16(vget_low_s16(rows->val[0]),
  923. vget_low_s16(rows->val[1])); // in0 + in8
  924. const int16x4_t b = vqsub_s16(vget_low_s16(rows->val[0]),
  925. vget_low_s16(rows->val[1])); // in0 - in8
  926. // c = kC2 * in4 - kC1 * in12
  927. // d = kC1 * in4 + kC2 * in12
  928. const int16x4_t c = vqsub_s16(vget_low_s16(C1), vget_high_s16(C0));
  929. const int16x4_t d = vqadd_s16(vget_low_s16(C0), vget_high_s16(C1));
  930. const int16x8_t D0 = vcombine_s16(a, b); // D0 = a | b
  931. const int16x8_t D1 = vcombine_s16(d, c); // D1 = d | c
  932. const int16x8_t E0 = vqaddq_s16(D0, D1); // a+d | b+c
  933. const int16x8_t E_tmp = vqsubq_s16(D0, D1); // a-d | b-c
  934. const int16x8_t E1 = vcombine_s16(vget_high_s16(E_tmp), vget_low_s16(E_tmp));
  935. Transpose8x2_NEON(E0, E1, rows);
  936. }
  937. static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
  938. int16x8x2_t rows;
  939. INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8));
  940. TransformPass_NEON(&rows);
  941. TransformPass_NEON(&rows);
  942. Add4x4_NEON(rows.val[0], rows.val[1], dst);
  943. }
  944. #else
  945. static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
  946. const int kBPS = BPS;
  947. // kC1, kC2. Padded because vld1.16 loads 8 bytes
  948. const int16_t constants[4] = { kC1, kC2, 0, 0 };
  949. /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */
  950. __asm__ volatile (
  951. "vld1.16 {q1, q2}, [%[in]] \n"
  952. "vld1.16 {d0}, [%[constants]] \n"
  953. /* d2: in[0]
  954. * d3: in[8]
  955. * d4: in[4]
  956. * d5: in[12]
  957. */
  958. "vswp d3, d4 \n"
  959. /* q8 = {in[4], in[12]} * kC1 * 2 >> 16
  960. * q9 = {in[4], in[12]} * kC2 >> 16
  961. */
  962. "vqdmulh.s16 q8, q2, d0[0] \n"
  963. "vqdmulh.s16 q9, q2, d0[1] \n"
  964. /* d22 = a = in[0] + in[8]
  965. * d23 = b = in[0] - in[8]
  966. */
  967. "vqadd.s16 d22, d2, d3 \n"
  968. "vqsub.s16 d23, d2, d3 \n"
  969. /* The multiplication should be x * kC1 >> 16
  970. * However, with vqdmulh we get x * kC1 * 2 >> 16
  971. * (multiply, double, return high half)
  972. * We avoided this in kC2 by pre-shifting the constant.
  973. * q8 = in[4]/[12] * kC1 >> 16
  974. */
  975. "vshr.s16 q8, q8, #1 \n"
  976. /* Add {in[4], in[12]} back after the multiplication. This is handled by
  977. * adding 1 << 16 to kC1 in the libwebp C code.
  978. */
  979. "vqadd.s16 q8, q2, q8 \n"
  980. /* d20 = c = in[4]*kC2 - in[12]*kC1
  981. * d21 = d = in[4]*kC1 + in[12]*kC2
  982. */
  983. "vqsub.s16 d20, d18, d17 \n"
  984. "vqadd.s16 d21, d19, d16 \n"
  985. /* d2 = tmp[0] = a + d
  986. * d3 = tmp[1] = b + c
  987. * d4 = tmp[2] = b - c
  988. * d5 = tmp[3] = a - d
  989. */
  990. "vqadd.s16 d2, d22, d21 \n"
  991. "vqadd.s16 d3, d23, d20 \n"
  992. "vqsub.s16 d4, d23, d20 \n"
  993. "vqsub.s16 d5, d22, d21 \n"
  994. "vzip.16 q1, q2 \n"
  995. "vzip.16 q1, q2 \n"
  996. "vswp d3, d4 \n"
  997. /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
  998. * q9 = {tmp[4], tmp[12]} * kC2 >> 16
  999. */
  1000. "vqdmulh.s16 q8, q2, d0[0] \n"
  1001. "vqdmulh.s16 q9, q2, d0[1] \n"
  1002. /* d22 = a = tmp[0] + tmp[8]
  1003. * d23 = b = tmp[0] - tmp[8]
  1004. */
  1005. "vqadd.s16 d22, d2, d3 \n"
  1006. "vqsub.s16 d23, d2, d3 \n"
  1007. /* See long winded explanations prior */
  1008. "vshr.s16 q8, q8, #1 \n"
  1009. "vqadd.s16 q8, q2, q8 \n"
  1010. /* d20 = c = in[4]*kC2 - in[12]*kC1
  1011. * d21 = d = in[4]*kC1 + in[12]*kC2
  1012. */
  1013. "vqsub.s16 d20, d18, d17 \n"
  1014. "vqadd.s16 d21, d19, d16 \n"
  1015. /* d2 = tmp[0] = a + d
  1016. * d3 = tmp[1] = b + c
  1017. * d4 = tmp[2] = b - c
  1018. * d5 = tmp[3] = a - d
  1019. */
  1020. "vqadd.s16 d2, d22, d21 \n"
  1021. "vqadd.s16 d3, d23, d20 \n"
  1022. "vqsub.s16 d4, d23, d20 \n"
  1023. "vqsub.s16 d5, d22, d21 \n"
  1024. "vld1.32 d6[0], [%[dst]], %[kBPS] \n"
  1025. "vld1.32 d6[1], [%[dst]], %[kBPS] \n"
  1026. "vld1.32 d7[0], [%[dst]], %[kBPS] \n"
  1027. "vld1.32 d7[1], [%[dst]], %[kBPS] \n"
  1028. "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
  1029. /* (val) + 4 >> 3 */
  1030. "vrshr.s16 d2, d2, #3 \n"
  1031. "vrshr.s16 d3, d3, #3 \n"
  1032. "vrshr.s16 d4, d4, #3 \n"
  1033. "vrshr.s16 d5, d5, #3 \n"
  1034. "vzip.16 q1, q2 \n"
  1035. "vzip.16 q1, q2 \n"
  1036. /* Must accumulate before saturating */
  1037. "vmovl.u8 q8, d6 \n"
  1038. "vmovl.u8 q9, d7 \n"
  1039. "vqadd.s16 q1, q1, q8 \n"
  1040. "vqadd.s16 q2, q2, q9 \n"
  1041. "vqmovun.s16 d0, q1 \n"
  1042. "vqmovun.s16 d1, q2 \n"
  1043. "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
  1044. "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
  1045. "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
  1046. "vst1.32 d1[1], [%[dst]] \n"
  1047. : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
  1048. : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */
  1049. : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */
  1050. );
  1051. }
  1052. #endif // WEBP_USE_INTRINSICS
  1053. static void TransformTwo_NEON(const int16_t* in, uint8_t* dst, int do_two) {
  1054. TransformOne_NEON(in, dst);
  1055. if (do_two) {
  1056. TransformOne_NEON(in + 16, dst + 4);
  1057. }
  1058. }
  1059. static void TransformDC_NEON(const int16_t* in, uint8_t* dst) {
  1060. const int16x8_t DC = vdupq_n_s16(in[0]);
  1061. Add4x4_NEON(DC, DC, dst);
  1062. }
  1063. //------------------------------------------------------------------------------
  1064. #define STORE_WHT(dst, col, rows) do { \
  1065. *dst = vgetq_lane_s32(rows.val[0], col); (dst) += 16; \
  1066. *dst = vgetq_lane_s32(rows.val[1], col); (dst) += 16; \
  1067. *dst = vgetq_lane_s32(rows.val[2], col); (dst) += 16; \
  1068. *dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \
  1069. } while (0)
  1070. static void TransformWHT_NEON(const int16_t* in, int16_t* out) {
  1071. int32x4x4_t tmp;
  1072. {
  1073. // Load the source.
  1074. const int16x4_t in00_03 = vld1_s16(in + 0);
  1075. const int16x4_t in04_07 = vld1_s16(in + 4);
  1076. const int16x4_t in08_11 = vld1_s16(in + 8);
  1077. const int16x4_t in12_15 = vld1_s16(in + 12);
  1078. const int32x4_t a0 = vaddl_s16(in00_03, in12_15); // in[0..3] + in[12..15]
  1079. const int32x4_t a1 = vaddl_s16(in04_07, in08_11); // in[4..7] + in[8..11]
  1080. const int32x4_t a2 = vsubl_s16(in04_07, in08_11); // in[4..7] - in[8..11]
  1081. const int32x4_t a3 = vsubl_s16(in00_03, in12_15); // in[0..3] - in[12..15]
  1082. tmp.val[0] = vaddq_s32(a0, a1);
  1083. tmp.val[1] = vaddq_s32(a3, a2);
  1084. tmp.val[2] = vsubq_s32(a0, a1);
  1085. tmp.val[3] = vsubq_s32(a3, a2);
  1086. // Arrange the temporary results column-wise.
  1087. tmp = Transpose4x4_NEON(tmp);
  1088. }
  1089. {
  1090. const int32x4_t kCst3 = vdupq_n_s32(3);
  1091. const int32x4_t dc = vaddq_s32(tmp.val[0], kCst3); // add rounder
  1092. const int32x4_t a0 = vaddq_s32(dc, tmp.val[3]);
  1093. const int32x4_t a1 = vaddq_s32(tmp.val[1], tmp.val[2]);
  1094. const int32x4_t a2 = vsubq_s32(tmp.val[1], tmp.val[2]);
  1095. const int32x4_t a3 = vsubq_s32(dc, tmp.val[3]);
  1096. tmp.val[0] = vaddq_s32(a0, a1);
  1097. tmp.val[1] = vaddq_s32(a3, a2);
  1098. tmp.val[2] = vsubq_s32(a0, a1);
  1099. tmp.val[3] = vsubq_s32(a3, a2);
  1100. // right shift the results by 3.
  1101. tmp.val[0] = vshrq_n_s32(tmp.val[0], 3);
  1102. tmp.val[1] = vshrq_n_s32(tmp.val[1], 3);
  1103. tmp.val[2] = vshrq_n_s32(tmp.val[2], 3);
  1104. tmp.val[3] = vshrq_n_s32(tmp.val[3], 3);
  1105. STORE_WHT(out, 0, tmp);
  1106. STORE_WHT(out, 1, tmp);
  1107. STORE_WHT(out, 2, tmp);
  1108. STORE_WHT(out, 3, tmp);
  1109. }
  1110. }
  1111. #undef STORE_WHT
  1112. //------------------------------------------------------------------------------
  1113. #define MUL(a, b) (((a) * (b)) >> 16)
  1114. static void TransformAC3_NEON(const int16_t* in, uint8_t* dst) {
  1115. static const int kC1_full = 20091 + (1 << 16);
  1116. static const int kC2_full = 35468;
  1117. const int16x4_t A = vld1_dup_s16(in);
  1118. const int16x4_t c4 = vdup_n_s16(MUL(in[4], kC2_full));
  1119. const int16x4_t d4 = vdup_n_s16(MUL(in[4], kC1_full));
  1120. const int c1 = MUL(in[1], kC2_full);
  1121. const int d1 = MUL(in[1], kC1_full);
  1122. const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 |
  1123. (uint64_t)( c1 & 0xffff) << 16 |
  1124. (uint64_t)(-c1 & 0xffff) << 32 |
  1125. (uint64_t)(-d1 & 0xffff) << 48;
  1126. const int16x4_t CD = vcreate_s16(cd);
  1127. const int16x4_t B = vqadd_s16(A, CD);
  1128. const int16x8_t m0_m1 = vcombine_s16(vqadd_s16(B, d4), vqadd_s16(B, c4));
  1129. const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4));
  1130. Add4x4_NEON(m0_m1, m2_m3, dst);
  1131. }
  1132. #undef MUL
  1133. //------------------------------------------------------------------------------
  1134. // 4x4
  1135. static void DC4_NEON(uint8_t* dst) { // DC
  1136. const uint8x8_t A = vld1_u8(dst - BPS); // top row
  1137. const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
  1138. const uint16x4_t p1 = vpadd_u16(p0, p0);
  1139. const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1);
  1140. const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1);
  1141. const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1);
  1142. const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1);
  1143. const uint16x8_t s0 = vaddl_u8(L0, L1);
  1144. const uint16x8_t s1 = vaddl_u8(L2, L3);
  1145. const uint16x8_t s01 = vaddq_u16(s0, s1);
  1146. const uint16x8_t sum = vaddq_u16(s01, vcombine_u16(p1, p1));
  1147. const uint8x8_t dc0 = vrshrn_n_u16(sum, 3); // (sum + 4) >> 3
  1148. const uint8x8_t dc = vdup_lane_u8(dc0, 0);
  1149. int i;
  1150. for (i = 0; i < 4; ++i) {
  1151. vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc), 0);
  1152. }
  1153. }
  1154. // TrueMotion (4x4 + 8x8)
  1155. static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, int size) {
  1156. const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
  1157. const uint8x8_t T = vld1_u8(dst - BPS); // top row 'A[0..3]'
  1158. const int16x8_t d = vreinterpretq_s16_u16(vsubl_u8(T, TL)); // A[c] - A[-1]
  1159. int y;
  1160. for (y = 0; y < size; y += 4) {
  1161. // left edge
  1162. const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
  1163. const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
  1164. const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
  1165. const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
  1166. const int16x8_t r0 = vaddq_s16(L0, d); // L[r] + A[c] - A[-1]
  1167. const int16x8_t r1 = vaddq_s16(L1, d);
  1168. const int16x8_t r2 = vaddq_s16(L2, d);
  1169. const int16x8_t r3 = vaddq_s16(L3, d);
  1170. // Saturate and store the result.
  1171. const uint32x2_t r0_u32 = vreinterpret_u32_u8(vqmovun_s16(r0));
  1172. const uint32x2_t r1_u32 = vreinterpret_u32_u8(vqmovun_s16(r1));
  1173. const uint32x2_t r2_u32 = vreinterpret_u32_u8(vqmovun_s16(r2));
  1174. const uint32x2_t r3_u32 = vreinterpret_u32_u8(vqmovun_s16(r3));
  1175. if (size == 4) {
  1176. vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0_u32, 0);
  1177. vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1_u32, 0);
  1178. vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2_u32, 0);
  1179. vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3_u32, 0);
  1180. } else {
  1181. vst1_u32((uint32_t*)(dst + 0 * BPS), r0_u32);
  1182. vst1_u32((uint32_t*)(dst + 1 * BPS), r1_u32);
  1183. vst1_u32((uint32_t*)(dst + 2 * BPS), r2_u32);
  1184. vst1_u32((uint32_t*)(dst + 3 * BPS), r3_u32);
  1185. }
  1186. dst += 4 * BPS;
  1187. }
  1188. }
  1189. static void TM4_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 4); }
  1190. static void VE4_NEON(uint8_t* dst) { // vertical
  1191. // NB: avoid vld1_u64 here as an alignment hint may be added -> SIGBUS.
  1192. const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(dst - BPS - 1)); // top row
  1193. const uint64x1_t A1 = vshr_n_u64(A0, 8);
  1194. const uint64x1_t A2 = vshr_n_u64(A0, 16);
  1195. const uint8x8_t ABCDEFGH = vreinterpret_u8_u64(A0);
  1196. const uint8x8_t BCDEFGH0 = vreinterpret_u8_u64(A1);
  1197. const uint8x8_t CDEFGH00 = vreinterpret_u8_u64(A2);
  1198. const uint8x8_t b = vhadd_u8(ABCDEFGH, CDEFGH00);
  1199. const uint8x8_t avg = vrhadd_u8(b, BCDEFGH0);
  1200. int i;
  1201. for (i = 0; i < 4; ++i) {
  1202. vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(avg), 0);
  1203. }
  1204. }
  1205. static void RD4_NEON(uint8_t* dst) { // Down-right
  1206. const uint8x8_t XABCD_u8 = vld1_u8(dst - BPS - 1);
  1207. const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
  1208. const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32);
  1209. const uint32_t I = dst[-1 + 0 * BPS];
  1210. const uint32_t J = dst[-1 + 1 * BPS];
  1211. const uint32_t K = dst[-1 + 2 * BPS];
  1212. const uint32_t L = dst[-1 + 3 * BPS];
  1213. const uint64x1_t LKJI____ =
  1214. vcreate_u64((uint64_t)L | (K << 8) | (J << 16) | (I << 24));
  1215. const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC);
  1216. const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8));
  1217. const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16));
  1218. const uint8_t D = vget_lane_u8(XABCD_u8, 4);
  1219. const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6);
  1220. const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC);
  1221. const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8);
  1222. const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_);
  1223. const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
  1224. const uint32x2_t r3 = vreinterpret_u32_u8(avg2);
  1225. const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
  1226. const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
  1227. const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
  1228. vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0);
  1229. vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0);
  1230. vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0);
  1231. vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0);
  1232. }
  1233. static void LD4_NEON(uint8_t* dst) { // Down-left
  1234. // Note using the same shift trick as VE4() is slower here.
  1235. const uint8x8_t ABCDEFGH = vld1_u8(dst - BPS + 0);
  1236. const uint8x8_t BCDEFGH0 = vld1_u8(dst - BPS + 1);
  1237. const uint8x8_t CDEFGH00 = vld1_u8(dst - BPS + 2);
  1238. const uint8x8_t CDEFGHH0 = vset_lane_u8(dst[-BPS + 7], CDEFGH00, 6);
  1239. const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGHH0);
  1240. const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0);
  1241. const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
  1242. const uint32x2_t r0 = vreinterpret_u32_u8(avg2);
  1243. const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
  1244. const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
  1245. const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
  1246. vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0);
  1247. vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0);
  1248. vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0);
  1249. vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0);
  1250. }
  1251. //------------------------------------------------------------------------------
  1252. // Chroma
  1253. static void VE8uv_NEON(uint8_t* dst) { // vertical
  1254. const uint8x8_t top = vld1_u8(dst - BPS);
  1255. int j;
  1256. for (j = 0; j < 8; ++j) {
  1257. vst1_u8(dst + j * BPS, top);
  1258. }
  1259. }
  1260. static void HE8uv_NEON(uint8_t* dst) { // horizontal
  1261. int j;
  1262. for (j = 0; j < 8; ++j) {
  1263. const uint8x8_t left = vld1_dup_u8(dst - 1);
  1264. vst1_u8(dst, left);
  1265. dst += BPS;
  1266. }
  1267. }
  1268. static WEBP_INLINE void DC8_NEON(uint8_t* dst, int do_top, int do_left) {
  1269. uint16x8_t sum_top;
  1270. uint16x8_t sum_left;
  1271. uint8x8_t dc0;
  1272. if (do_top) {
  1273. const uint8x8_t A = vld1_u8(dst - BPS); // top row
  1274. #if defined(__aarch64__)
  1275. const uint16_t p2 = vaddlv_u8(A);
  1276. sum_top = vdupq_n_u16(p2);
  1277. #else
  1278. const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
  1279. const uint16x4_t p1 = vpadd_u16(p0, p0);
  1280. const uint16x4_t p2 = vpadd_u16(p1, p1);
  1281. sum_top = vcombine_u16(p2, p2);
  1282. #endif
  1283. }
  1284. if (do_left) {
  1285. const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1);
  1286. const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1);
  1287. const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1);
  1288. const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1);
  1289. const uint8x8_t L4 = vld1_u8(dst + 4 * BPS - 1);
  1290. const uint8x8_t L5 = vld1_u8(dst + 5 * BPS - 1);
  1291. const uint8x8_t L6 = vld1_u8(dst + 6 * BPS - 1);
  1292. const uint8x8_t L7 = vld1_u8(dst + 7 * BPS - 1);
  1293. const uint16x8_t s0 = vaddl_u8(L0, L1);
  1294. const uint16x8_t s1 = vaddl_u8(L2, L3);
  1295. const uint16x8_t s2 = vaddl_u8(L4, L5);
  1296. const uint16x8_t s3 = vaddl_u8(L6, L7);
  1297. const uint16x8_t s01 = vaddq_u16(s0, s1);
  1298. const uint16x8_t s23 = vaddq_u16(s2, s3);
  1299. sum_left = vaddq_u16(s01, s23);
  1300. }
  1301. if (do_top && do_left) {
  1302. const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
  1303. dc0 = vrshrn_n_u16(sum, 4);
  1304. } else if (do_top) {
  1305. dc0 = vrshrn_n_u16(sum_top, 3);
  1306. } else if (do_left) {
  1307. dc0 = vrshrn_n_u16(sum_left, 3);
  1308. } else {
  1309. dc0 = vdup_n_u8(0x80);
  1310. }
  1311. {
  1312. const uint8x8_t dc = vdup_lane_u8(dc0, 0);
  1313. int i;
  1314. for (i = 0; i < 8; ++i) {
  1315. vst1_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc));
  1316. }
  1317. }
  1318. }
  1319. static void DC8uv_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 1); }
  1320. static void DC8uvNoTop_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 1); }
  1321. static void DC8uvNoLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 0); }
  1322. static void DC8uvNoTopLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 0); }
  1323. static void TM8uv_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 8); }
  1324. //------------------------------------------------------------------------------
  1325. // 16x16
  1326. static void VE16_NEON(uint8_t* dst) { // vertical
  1327. const uint8x16_t top = vld1q_u8(dst - BPS);
  1328. int j;
  1329. for (j = 0; j < 16; ++j) {
  1330. vst1q_u8(dst + j * BPS, top);
  1331. }
  1332. }
  1333. static void HE16_NEON(uint8_t* dst) { // horizontal
  1334. int j;
  1335. for (j = 0; j < 16; ++j) {
  1336. const uint8x16_t left = vld1q_dup_u8(dst - 1);
  1337. vst1q_u8(dst, left);
  1338. dst += BPS;
  1339. }
  1340. }
  1341. static WEBP_INLINE void DC16_NEON(uint8_t* dst, int do_top, int do_left) {
  1342. uint16x8_t sum_top;
  1343. uint16x8_t sum_left;
  1344. uint8x8_t dc0;
  1345. if (do_top) {
  1346. const uint8x16_t A = vld1q_u8(dst - BPS); // top row
  1347. #if defined(__aarch64__)
  1348. const uint16_t p3 = vaddlvq_u8(A);
  1349. sum_top = vdupq_n_u16(p3);
  1350. #else
  1351. const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top
  1352. const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
  1353. const uint16x4_t p2 = vpadd_u16(p1, p1);
  1354. const uint16x4_t p3 = vpadd_u16(p2, p2);
  1355. sum_top = vcombine_u16(p3, p3);
  1356. #endif
  1357. }
  1358. if (do_left) {
  1359. int i;
  1360. sum_left = vdupq_n_u16(0);
  1361. for (i = 0; i < 16; i += 8) {
  1362. const uint8x8_t L0 = vld1_u8(dst + (i + 0) * BPS - 1);
  1363. const uint8x8_t L1 = vld1_u8(dst + (i + 1) * BPS - 1);
  1364. const uint8x8_t L2 = vld1_u8(dst + (i + 2) * BPS - 1);
  1365. const uint8x8_t L3 = vld1_u8(dst + (i + 3) * BPS - 1);
  1366. const uint8x8_t L4 = vld1_u8(dst + (i + 4) * BPS - 1);
  1367. const uint8x8_t L5 = vld1_u8(dst + (i + 5) * BPS - 1);
  1368. const uint8x8_t L6 = vld1_u8(dst + (i + 6) * BPS - 1);
  1369. const uint8x8_t L7 = vld1_u8(dst + (i + 7) * BPS - 1);
  1370. const uint16x8_t s0 = vaddl_u8(L0, L1);
  1371. const uint16x8_t s1 = vaddl_u8(L2, L3);
  1372. const uint16x8_t s2 = vaddl_u8(L4, L5);
  1373. const uint16x8_t s3 = vaddl_u8(L6, L7);
  1374. const uint16x8_t s01 = vaddq_u16(s0, s1);
  1375. const uint16x8_t s23 = vaddq_u16(s2, s3);
  1376. const uint16x8_t sum = vaddq_u16(s01, s23);
  1377. sum_left = vaddq_u16(sum_left, sum);
  1378. }
  1379. }
  1380. if (do_top && do_left) {
  1381. const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
  1382. dc0 = vrshrn_n_u16(sum, 5);
  1383. } else if (do_top) {
  1384. dc0 = vrshrn_n_u16(sum_top, 4);
  1385. } else if (do_left) {
  1386. dc0 = vrshrn_n_u16(sum_left, 4);
  1387. } else {
  1388. dc0 = vdup_n_u8(0x80);
  1389. }
  1390. {
  1391. const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
  1392. int i;
  1393. for (i = 0; i < 16; ++i) {
  1394. vst1q_u8(dst + i * BPS, dc);
  1395. }
  1396. }
  1397. }
  1398. static void DC16TopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 1); }
  1399. static void DC16NoTop_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 1); }
  1400. static void DC16NoLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 0); }
  1401. static void DC16NoTopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 0); }
  1402. static void TM16_NEON(uint8_t* dst) {
  1403. const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
  1404. const uint8x16_t T = vld1q_u8(dst - BPS); // top row 'A[0..15]'
  1405. // A[c] - A[-1]
  1406. const int16x8_t d_lo = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), TL));
  1407. const int16x8_t d_hi = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), TL));
  1408. int y;
  1409. for (y = 0; y < 16; y += 4) {
  1410. // left edge
  1411. const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
  1412. const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
  1413. const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
  1414. const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
  1415. const int16x8_t r0_lo = vaddq_s16(L0, d_lo); // L[r] + A[c] - A[-1]
  1416. const int16x8_t r1_lo = vaddq_s16(L1, d_lo);
  1417. const int16x8_t r2_lo = vaddq_s16(L2, d_lo);
  1418. const int16x8_t r3_lo = vaddq_s16(L3, d_lo);
  1419. const int16x8_t r0_hi = vaddq_s16(L0, d_hi);
  1420. const int16x8_t r1_hi = vaddq_s16(L1, d_hi);
  1421. const int16x8_t r2_hi = vaddq_s16(L2, d_hi);
  1422. const int16x8_t r3_hi = vaddq_s16(L3, d_hi);
  1423. // Saturate and store the result.
  1424. const uint8x16_t row0 = vcombine_u8(vqmovun_s16(r0_lo), vqmovun_s16(r0_hi));
  1425. const uint8x16_t row1 = vcombine_u8(vqmovun_s16(r1_lo), vqmovun_s16(r1_hi));
  1426. const uint8x16_t row2 = vcombine_u8(vqmovun_s16(r2_lo), vqmovun_s16(r2_hi));
  1427. const uint8x16_t row3 = vcombine_u8(vqmovun_s16(r3_lo), vqmovun_s16(r3_hi));
  1428. vst1q_u8(dst + 0 * BPS, row0);
  1429. vst1q_u8(dst + 1 * BPS, row1);
  1430. vst1q_u8(dst + 2 * BPS, row2);
  1431. vst1q_u8(dst + 3 * BPS, row3);
  1432. dst += 4 * BPS;
  1433. }
  1434. }
  1435. //------------------------------------------------------------------------------
  1436. // Entry point
  1437. extern void VP8DspInitNEON(void);
  1438. WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitNEON(void) {
  1439. VP8Transform = TransformTwo_NEON;
  1440. VP8TransformAC3 = TransformAC3_NEON;
  1441. VP8TransformDC = TransformDC_NEON;
  1442. VP8TransformWHT = TransformWHT_NEON;
  1443. VP8VFilter16 = VFilter16_NEON;
  1444. VP8VFilter16i = VFilter16i_NEON;
  1445. VP8HFilter16 = HFilter16_NEON;
  1446. #if !defined(WORK_AROUND_GCC)
  1447. VP8HFilter16i = HFilter16i_NEON;
  1448. #endif
  1449. VP8VFilter8 = VFilter8_NEON;
  1450. VP8VFilter8i = VFilter8i_NEON;
  1451. #if !defined(WORK_AROUND_GCC)
  1452. VP8HFilter8 = HFilter8_NEON;
  1453. VP8HFilter8i = HFilter8i_NEON;
  1454. #endif
  1455. VP8SimpleVFilter16 = SimpleVFilter16_NEON;
  1456. VP8SimpleHFilter16 = SimpleHFilter16_NEON;
  1457. VP8SimpleVFilter16i = SimpleVFilter16i_NEON;
  1458. VP8SimpleHFilter16i = SimpleHFilter16i_NEON;
  1459. VP8PredLuma4[0] = DC4_NEON;
  1460. VP8PredLuma4[1] = TM4_NEON;
  1461. VP8PredLuma4[2] = VE4_NEON;
  1462. VP8PredLuma4[4] = RD4_NEON;
  1463. VP8PredLuma4[6] = LD4_NEON;
  1464. VP8PredLuma16[0] = DC16TopLeft_NEON;
  1465. VP8PredLuma16[1] = TM16_NEON;
  1466. VP8PredLuma16[2] = VE16_NEON;
  1467. VP8PredLuma16[3] = HE16_NEON;
  1468. VP8PredLuma16[4] = DC16NoTop_NEON;
  1469. VP8PredLuma16[5] = DC16NoLeft_NEON;
  1470. VP8PredLuma16[6] = DC16NoTopLeft_NEON;
  1471. VP8PredChroma8[0] = DC8uv_NEON;
  1472. VP8PredChroma8[1] = TM8uv_NEON;
  1473. VP8PredChroma8[2] = VE8uv_NEON;
  1474. VP8PredChroma8[3] = HE8uv_NEON;
  1475. VP8PredChroma8[4] = DC8uvNoTop_NEON;
  1476. VP8PredChroma8[5] = DC8uvNoLeft_NEON;
  1477. VP8PredChroma8[6] = DC8uvNoTopLeft_NEON;
  1478. }
  1479. #else // !WEBP_USE_NEON
  1480. WEBP_DSP_INIT_STUB(VP8DspInitNEON)
  1481. #endif // WEBP_USE_NEON