sse2neon.h 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. #pragma once
  2. /*
  3. The header contains inlining code
  4. which translates SSE intrinsics to NEON intrinsics or software emulation.
  5. You are encouraged for commitments.
  6. Add missing intrinsics, add unittests, purify the implementation,
  7. merge and simplify templates.
  8. Warning: The code is made in deep nights, so it surely contains bugs,
  9. imperfections, flaws and all other kinds of errors and mistakes.
  10. */
  11. /* Author: Vitaliy Manushkin <agri@yandex-team.ru> */
  12. #include <util/system/platform.h>
  13. #include <util/system/compiler.h>
  14. #include <util/system/types.h>
  15. #if !defined(_arm64_)
  16. #error "This header is for ARM64 (aarch64) platform only. " \
  17. "Include sse.h instead of including this header directly."
  18. #endif
  19. #include <arm_neon.h>
  20. union __m128i {
  21. uint64x2_t AsUi64x2;
  22. int64x2_t AsSi64x2;
  23. uint32x4_t AsUi32x4;
  24. int32x4_t AsSi32x4;
  25. uint16x8_t AsUi16x8;
  26. int16x8_t AsSi16x8;
  27. uint8x16_t AsUi8x16;
  28. int8x16_t AsSi8x16;
  29. float32x4_t AsFloat32x4;
  30. float64x2_t AsFloat64x2;
  31. };
  32. union __m128 {
  33. float32x4_t AsFloat32x4;
  34. float64x2_t AsFloat64x2;
  35. uint32x4_t AsUi32x4;
  36. int32x4_t AsSi32x4;
  37. uint64x2_t AsUi64x2;
  38. int64x2_t AsSi64x2;
  39. uint8x16_t AsUi8x16;
  40. int8x16_t AsSi8x16;
  41. __m128i As128i;
  42. };
  43. typedef float64x2_t __m128d;
  44. enum _mm_hint
  45. {
  46. /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
  47. _MM_HINT_ET0 = 7,
  48. _MM_HINT_ET1 = 6,
  49. _MM_HINT_T0 = 3,
  50. _MM_HINT_T1 = 2,
  51. _MM_HINT_T2 = 1,
  52. _MM_HINT_NTA = 0
  53. };
  54. Y_FORCE_INLINE void _mm_prefetch(const void *p, enum _mm_hint) {
  55. __builtin_prefetch(p);
  56. }
  57. template <typename TType>
  58. struct TQType;
  59. template <>
  60. struct TQType<uint8x16_t> {
  61. static inline uint8x16_t& As(__m128i& value) {
  62. return value.AsUi8x16;
  63. }
  64. static inline const uint8x16_t& As(const __m128i& value) {
  65. return value.AsUi8x16;
  66. }
  67. };
  68. template <>
  69. struct TQType<int8x16_t> {
  70. static inline int8x16_t& As(__m128i& value) {
  71. return value.AsSi8x16;
  72. }
  73. static inline const int8x16_t& As(const __m128i& value) {
  74. return value.AsSi8x16;
  75. }
  76. };
  77. template <>
  78. struct TQType<uint16x8_t> {
  79. static inline uint16x8_t& As(__m128i& value) {
  80. return value.AsUi16x8;
  81. }
  82. static inline const uint16x8_t& As(const __m128i& value) {
  83. return value.AsUi16x8;
  84. }
  85. };
  86. template <>
  87. struct TQType<int16x8_t> {
  88. static inline int16x8_t& As(__m128i& value) {
  89. return value.AsSi16x8;
  90. }
  91. static inline const int16x8_t& As(const __m128i& value) {
  92. return value.AsSi16x8;
  93. }
  94. };
  95. template <>
  96. struct TQType<uint32x4_t> {
  97. static inline uint32x4_t& As(__m128i& value) {
  98. return value.AsUi32x4;
  99. }
  100. static inline const uint32x4_t& As(const __m128i& value) {
  101. return value.AsUi32x4;
  102. }
  103. };
  104. template <>
  105. struct TQType<int32x4_t> {
  106. static inline int32x4_t& As(__m128i& value) {
  107. return value.AsSi32x4;
  108. }
  109. static inline const int32x4_t& As(const __m128i& value) {
  110. return value.AsSi32x4;
  111. }
  112. };
  113. template <>
  114. struct TQType<uint64x2_t> {
  115. static inline uint64x2_t& As(__m128i& value) {
  116. return value.AsUi64x2;
  117. }
  118. static inline const uint64x2_t& As(const __m128i& value) {
  119. return value.AsUi64x2;
  120. }
  121. static inline uint64x2_t& As(__m128& value) {
  122. return value.AsUi64x2;
  123. }
  124. static inline const uint64x2_t& As(const __m128& value) {
  125. return value.AsUi64x2;
  126. }
  127. };
  128. template <>
  129. struct TQType<int64x2_t> {
  130. static inline int64x2_t& As(__m128i& value) {
  131. return value.AsSi64x2;
  132. }
  133. static inline const int64x2_t& As(const __m128i& value) {
  134. return value.AsSi64x2;
  135. }
  136. };
  137. template <typename TValue>
  138. struct TBaseWrapper {
  139. TValue Value;
  140. Y_FORCE_INLINE
  141. operator TValue&() {
  142. return Value;
  143. }
  144. Y_FORCE_INLINE
  145. operator const TValue&() const {
  146. return Value;
  147. }
  148. };
  149. template <typename TOp, typename TFunc, TFunc* func,
  150. typename TDup, TDup* dupfunc>
  151. struct TWrapperSingleDup: public TBaseWrapper<__m128i> {
  152. Y_FORCE_INLINE
  153. TWrapperSingleDup(const __m128i& op, const int shift) {
  154. TQType<TOp>::As(Value) = func(TQType<TOp>::As(op), dupfunc(shift));
  155. }
  156. };
  157. template <typename TOp, typename TFunc, TFunc* func,
  158. typename TDup, TDup* dupfunc>
  159. struct TWrapperSingleNegDup: public TBaseWrapper<__m128i> {
  160. Y_FORCE_INLINE
  161. TWrapperSingleNegDup(const __m128i& op, const int shift) {
  162. TQType<TOp>::As(Value) = func(TQType<TOp>::As(op), dupfunc(-shift));
  163. }
  164. };
  165. inline __m128i _mm_srl_epi16(__m128i a, __m128i count) {
  166. __m128i res;
  167. res.AsUi16x8 = vshlq_u16(a.AsUi16x8, vdupq_n_s16(-count.AsUi16x8[0]));
  168. return res;
  169. }
  170. inline __m128i _mm_srl_epi32(__m128i a, __m128i count) {
  171. __m128i res;
  172. res.AsUi32x4 = vshlq_u32(a.AsUi32x4, vdupq_n_s32(-count.AsUi32x4[0]));
  173. return res;
  174. }
  175. inline __m128i _mm_srl_epi64(__m128i a, __m128i count) {
  176. __m128i res;
  177. res.AsUi64x2 = vshlq_u64(a.AsUi64x2, vdupq_n_s64(-count.AsUi64x2[0]));
  178. return res;
  179. }
  180. inline __m128i _mm_srai_epi16(__m128i a, int count) {
  181. __m128i res;
  182. res.AsSi16x8 = vqshlq_s16(a.AsSi16x8, vdupq_n_s16(-count));
  183. return res;
  184. }
  185. inline __m128i _mm_srai_epi32(__m128i a, int count) {
  186. __m128i res;
  187. res.AsSi32x4 = vqshlq_s32(a.AsSi32x4, vdupq_n_s32(-count));
  188. return res;
  189. }
  190. using _mm_srli_epi16 =
  191. TWrapperSingleNegDup<uint16x8_t, decltype(vshlq_u16), vshlq_u16,
  192. decltype(vdupq_n_s16), vdupq_n_s16>;
  193. using _mm_srli_epi32 =
  194. TWrapperSingleNegDup<uint32x4_t, decltype(vshlq_u32), vshlq_u32,
  195. decltype(vdupq_n_s32), vdupq_n_s32>;
  196. using _mm_srli_epi64 =
  197. TWrapperSingleNegDup<uint64x2_t, decltype(vshlq_u64), vshlq_u64,
  198. decltype(vdupq_n_s64), vdupq_n_s64>;
  199. inline __m128i _mm_sll_epi16(__m128i a, __m128i count) {
  200. __m128i res;
  201. res.AsUi16x8 = vshlq_u16(a.AsUi16x8, vdupq_n_s16(count.AsUi16x8[0]));
  202. return res;
  203. }
  204. inline __m128i _mm_sll_epi32(__m128i a, __m128i count) {
  205. __m128i res;
  206. res.AsUi32x4 = vshlq_u32(a.AsUi32x4, vdupq_n_s32(count.AsUi32x4[0]));
  207. return res;
  208. }
  209. inline __m128i _mm_sll_epi64(__m128i a, __m128i count) {
  210. __m128i res;
  211. res.AsUi64x2 = vshlq_u64(a.AsUi64x2, vdupq_n_s64(count.AsUi64x2[0]));
  212. return res;
  213. }
  214. using _mm_slli_epi16 =
  215. TWrapperSingleDup<uint16x8_t, decltype(vshlq_u16), vshlq_u16,
  216. decltype(vdupq_n_s16), vdupq_n_s16>;
  217. using _mm_slli_epi32 =
  218. TWrapperSingleDup<uint32x4_t, decltype(vshlq_u32), vshlq_u32,
  219. decltype(vdupq_n_s32), vdupq_n_s32>;
  220. using _mm_slli_epi64 =
  221. TWrapperSingleDup<uint64x2_t, decltype(vshlq_u64), vshlq_u64,
  222. decltype(vdupq_n_s64), vdupq_n_s64>;
  223. template <typename TOp, typename TFunc, TFunc* func, typename... TParams>
  224. struct TWrapperDual : TBaseWrapper<__m128i> {
  225. Y_FORCE_INLINE
  226. TWrapperDual(const __m128i& op1, const __m128i& op2, TParams... params) {
  227. TQType<TOp>::As(Value) = (TOp)
  228. func(TQType<TOp>::As(op1),
  229. TQType<TOp>::As(op2),
  230. params...);
  231. }
  232. };
  233. template <typename TOp, typename TFunc, TFunc* func, typename... TParams>
  234. struct TWrapperDualSwap : TBaseWrapper<__m128i> {
  235. Y_FORCE_INLINE
  236. TWrapperDualSwap(const __m128i& op1, const __m128i& op2, TParams... params) {
  237. TQType<TOp>::As(Value) =
  238. func(TQType<TOp>::As(op2),
  239. TQType<TOp>::As(op1),
  240. params...);
  241. }
  242. };
  243. template <typename TOp, typename TFunc, TFunc* func, typename TArgument = __m128>
  244. struct TWrapperDualF : TBaseWrapper<TArgument> {
  245. Y_FORCE_INLINE
  246. TWrapperDualF(const TArgument& op1, const TArgument& op2) {
  247. TQType<TOp>::As(TBaseWrapper<TArgument>::Value) = (TOp) func(TQType<TOp>::As(op1), TQType<TOp>::As(op2));
  248. }
  249. };
  250. using _mm_or_si128 = TWrapperDual<uint64x2_t, decltype(vorrq_u64), vorrq_u64>;
  251. using _mm_and_si128 = TWrapperDual<uint64x2_t, decltype(vandq_u64), vandq_u64>;
  252. using _mm_andnot_si128 =
  253. TWrapperDualSwap<uint64x2_t, decltype(vbicq_u64), vbicq_u64>;
  254. using _mm_xor_si128 = TWrapperDual<uint64x2_t, decltype(veorq_u64), veorq_u64>;
  255. using _mm_add_epi8 = TWrapperDual<uint8x16_t, decltype(vaddq_u8), vaddq_u8>;
  256. using _mm_add_epi16 = TWrapperDual<uint16x8_t, decltype(vaddq_u16), vaddq_u16>;
  257. using _mm_add_epi32 = TWrapperDual<uint32x4_t, decltype(vaddq_u32), vaddq_u32>;
  258. using _mm_add_epi64 = TWrapperDual<uint64x2_t, decltype(vaddq_u64), vaddq_u64>;
  259. inline __m128i _mm_madd_epi16(__m128i a, __m128i b) {
  260. int32x4_t aLow;
  261. int32x4_t aHigh;
  262. int32x4_t bLow;
  263. int32x4_t bHigh;
  264. #ifdef __LITTLE_ENDIAN__
  265. aLow[0] = a.AsSi16x8[0]; //!< I couldn't find vector instructions to do that. Feel free to fix this code.
  266. aLow[1] = a.AsSi16x8[2];
  267. aLow[2] = a.AsSi16x8[4];
  268. aLow[3] = a.AsSi16x8[6];
  269. aHigh[0] = a.AsSi16x8[1];
  270. aHigh[1] = a.AsSi16x8[3];
  271. aHigh[2] = a.AsSi16x8[5];
  272. aHigh[3] = a.AsSi16x8[7];
  273. bLow[0] = b.AsSi16x8[0];
  274. bLow[1] = b.AsSi16x8[2];
  275. bLow[2] = b.AsSi16x8[4];
  276. bLow[3] = b.AsSi16x8[6];
  277. bHigh[0] = b.AsSi16x8[1];
  278. bHigh[1] = b.AsSi16x8[3];
  279. bHigh[2] = b.AsSi16x8[5];
  280. bHigh[3] = b.AsSi16x8[7];
  281. #else
  282. #error Not implemented yet. Do it yourself.
  283. #endif
  284. const int32x4_t lowMul = vmulq_u32(aLow, bLow);
  285. const int32x4_t highMul = vmulq_u32(aHigh, bHigh);
  286. __m128i res;
  287. res.AsSi32x4 = vaddq_u32(lowMul, highMul);
  288. return res;
  289. }
  290. using _mm_sub_epi8 = TWrapperDual<uint8x16_t, decltype(vsubq_u8), vsubq_u8>;
  291. using _mm_sub_epi16 = TWrapperDual<uint16x8_t, decltype(vsubq_u16), vsubq_u16>;
  292. using _mm_sub_epi32 = TWrapperDual<uint32x4_t, decltype(vsubq_u32), vsubq_u32>;
  293. using _mm_sub_epi64 = TWrapperDual<uint64x2_t, decltype(vsubq_u64), vsubq_u64>;
  294. using _mm_unpacklo_epi8 =
  295. TWrapperDual<uint8x16_t, decltype(vzip1q_u8), vzip1q_u8>;
  296. using _mm_unpackhi_epi8 =
  297. TWrapperDual<uint8x16_t, decltype(vzip2q_u8), vzip2q_u8>;
  298. using _mm_unpacklo_epi16 =
  299. TWrapperDual<uint16x8_t, decltype(vzip1q_u16), vzip1q_u16>;
  300. using _mm_unpackhi_epi16 =
  301. TWrapperDual<uint16x8_t, decltype(vzip2q_u16), vzip2q_u16>;
  302. using _mm_unpacklo_epi32 =
  303. TWrapperDual<uint32x4_t, decltype(vzip1q_u32), vzip1q_u32>;
  304. using _mm_unpackhi_epi32 =
  305. TWrapperDual<uint32x4_t, decltype(vzip2q_u32), vzip2q_u32>;
  306. using _mm_unpacklo_epi64 =
  307. TWrapperDual<uint64x2_t, decltype(vzip1q_u64), vzip1q_u64>;
  308. using _mm_unpackhi_epi64 =
  309. TWrapperDual<uint64x2_t, decltype(vzip2q_u64), vzip2q_u64>;
  310. using _mm_cmpeq_epi8 =
  311. TWrapperDual<uint8x16_t, decltype(vceqq_u8), vceqq_u8>;
  312. using _mm_cmpeq_epi16 =
  313. TWrapperDual<uint16x8_t, decltype(vceqq_u16), vceqq_u16>;
  314. using _mm_cmpeq_epi32 =
  315. TWrapperDual<uint32x4_t, decltype(vceqq_u32), vceqq_u32>;
  316. using _mm_cmpgt_epi8 =
  317. TWrapperDual<int8x16_t, decltype(vcgtq_s8), vcgtq_s8>;
  318. using _mm_cmpgt_epi16 =
  319. TWrapperDual<int16x8_t, decltype(vcgtq_s16), vcgtq_s16>;
  320. using _mm_cmpgt_epi32 =
  321. TWrapperDual<int32x4_t, decltype(vcgtq_s32), vcgtq_s32>;
  322. using _mm_cmplt_epi8 =
  323. TWrapperDual<int8x16_t, decltype(vcltq_s8), vcltq_s8>;
  324. using _mm_cmplt_epi16 =
  325. TWrapperDual<int16x8_t, decltype(vcltq_s16), vcltq_s16>;
  326. using _mm_cmplt_epi32 =
  327. TWrapperDual<int32x4_t, decltype(vcltq_s32), vcltq_s32>;
  328. Y_FORCE_INLINE __m128i _mm_load_si128(const __m128i* ptr) {
  329. __m128i result;
  330. result.AsUi64x2 = vld1q_u64((const uint64_t*)ptr);
  331. return result;
  332. }
  333. Y_FORCE_INLINE __m128i _mm_loadu_si128(const __m128i* ptr) {
  334. __m128i result;
  335. result.AsUi64x2 = vld1q_u64((const uint64_t*)ptr);
  336. return result;
  337. }
  338. Y_FORCE_INLINE __m128i _mm_lddqu_si128(const __m128i* ptr) {
  339. return _mm_loadu_si128(ptr);
  340. }
  341. Y_FORCE_INLINE void _mm_storeu_si128(__m128i* ptr, const __m128i& op) {
  342. vst1q_u64((uint64_t*)ptr, op.AsUi64x2);
  343. }
  344. Y_FORCE_INLINE void
  345. _mm_store_si128(__m128i* ptr, const __m128i& op) {
  346. vst1q_u64((uint64_t*)ptr, op.AsUi64x2);
  347. }
  348. template <typename TOp, typename TFunc, TFunc* func, typename... TParams>
  349. struct TWrapperSimple : TBaseWrapper<__m128i> {
  350. Y_FORCE_INLINE
  351. TWrapperSimple(TParams... params) {
  352. TQType<TOp>::As(Value) = func(params...);
  353. }
  354. };
  355. template <typename TOp, typename TFunc, TFunc* func, typename... TParams>
  356. struct TWrapperSimpleF : TBaseWrapper<__m128> {
  357. Y_FORCE_INLINE
  358. TWrapperSimpleF(TParams... params) {
  359. TQType<TOp>::As(Value) = func(params...);
  360. }
  361. };
  362. using _mm_set1_epi8 =
  363. TWrapperSimple<int8x16_t, decltype(vdupq_n_s8), vdupq_n_s8, const char>;
  364. using _mm_set1_epi16 =
  365. TWrapperSimple<int16x8_t, decltype(vdupq_n_s16), vdupq_n_s16, const ui16>;
  366. using _mm_set1_epi32 =
  367. TWrapperSimple<int32x4_t, decltype(vdupq_n_s32), vdupq_n_s32, const ui32>;
  368. struct _mm_setzero_si128 : TBaseWrapper<__m128i> {
  369. Y_FORCE_INLINE
  370. _mm_setzero_si128() {
  371. TQType<uint64x2_t>::As(Value) = vdupq_n_u64(0);
  372. }
  373. };
  374. struct _mm_loadl_epi64 : TBaseWrapper<__m128i> {
  375. Y_FORCE_INLINE
  376. _mm_loadl_epi64(const __m128i* p) {
  377. uint64x1_t im = vld1_u64((const uint64_t*)p);
  378. TQType<uint64x2_t>::As(Value) = vcombine_u64(im, vdup_n_u64(0));
  379. }
  380. };
  381. struct _mm_storel_epi64 : TBaseWrapper<__m128i> {
  382. Y_FORCE_INLINE
  383. _mm_storel_epi64(__m128i* a, __m128i op) {
  384. vst1_u64((uint64_t*)a, vget_low_u64(op.AsUi64x2));
  385. }
  386. };
  387. struct ShuffleStruct4 {
  388. ui8 x[4];
  389. };
  390. Y_FORCE_INLINE ShuffleStruct4
  391. _MM_SHUFFLE(ui8 x4, ui8 x3, ui8 x2, ui8 x1) {
  392. ShuffleStruct4 result;
  393. result.x[0] = x1;
  394. result.x[1] = x2;
  395. result.x[2] = x3;
  396. result.x[3] = x4;
  397. return result;
  398. }
  399. Y_FORCE_INLINE __m128i
  400. _mm_shuffle_epi32(const __m128i& op1, const ShuffleStruct4& op2) {
  401. __m128i result;
  402. const ui8 xi[4] = {
  403. ui8(op2.x[0] * 4), ui8(op2.x[1] * 4),
  404. ui8(op2.x[2] * 4), ui8(op2.x[3] * 4)
  405. };
  406. const uint8x16_t transform = {
  407. ui8(xi[0]), ui8(xi[0] + 1), ui8(xi[0] + 2), ui8(xi[0] + 3),
  408. ui8(xi[1]), ui8(xi[1] + 1), ui8(xi[1] + 2), ui8(xi[1] + 3),
  409. ui8(xi[2]), ui8(xi[2] + 1), ui8(xi[2] + 2), ui8(xi[2] + 3),
  410. ui8(xi[3]), ui8(xi[3] + 1), ui8(xi[3] + 2), ui8(xi[3] + 3)
  411. };
  412. result.AsUi8x16 = vqtbl1q_u8(op1.AsUi8x16, transform);
  413. return result;
  414. }
  415. Y_FORCE_INLINE int
  416. _mm_movemask_epi8(const __m128i& op) {
  417. uint8x16_t mask = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
  418. 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80};
  419. uint8x16_t opmasked = vandq_u8(op.AsUi8x16, mask);
  420. int8x16_t byteshifter = {
  421. 0, -7, 0, -7, 0, -7, 0, -7, 0, -7, 0, -7, 0, -7, 0, -7};
  422. uint8x16_t opshifted = vshlq_u8(opmasked, byteshifter);
  423. int16x8_t wordshifter = {-7, -5, -3, -1, 1, 3, 5, 7};
  424. uint16x8_t wordshifted =
  425. vshlq_u16(vreinterpretq_u16_u8(opshifted), wordshifter);
  426. return vaddvq_u16(wordshifted);
  427. }
  428. template <int imm>
  429. struct THelper_mm_srli_si128 : TBaseWrapper<__m128i> {
  430. Y_FORCE_INLINE
  431. THelper_mm_srli_si128(const __m128i a) {
  432. const auto zero = vdupq_n_u8(0);
  433. TQType<uint8x16_t>::As(Value) = vextq_u8(a.AsUi8x16, zero, imm);
  434. }
  435. };
  436. template <>
  437. struct THelper_mm_srli_si128<16> : TBaseWrapper<__m128i> {
  438. Y_FORCE_INLINE
  439. THelper_mm_srli_si128(const __m128i /* a */) {
  440. const auto zero = vdupq_n_u8(0);
  441. TQType<uint8x16_t>::As(Value) = zero;
  442. }
  443. };
  444. #define _mm_srli_si128(a, imm) THelper_mm_srli_si128<imm>(a)
  445. template<int imm>
  446. inline uint8x16_t vextq_u8_function(uint8x16_t a, uint8x16_t b) {
  447. return vextq_u8(a, b, imm);
  448. }
  449. template<>
  450. inline uint8x16_t vextq_u8_function<16>(uint8x16_t /* a */, uint8x16_t b) {
  451. return b;
  452. }
  453. template <int imm>
  454. struct THelper_mm_slli_si128 : TBaseWrapper<__m128i> {
  455. Y_FORCE_INLINE
  456. THelper_mm_slli_si128(const __m128i a) {
  457. auto zero = vdupq_n_u8(0);
  458. TQType<uint8x16_t>::As(Value) = vextq_u8_function<16 - imm>(zero, a.AsUi8x16);
  459. }
  460. };
  461. #define _mm_slli_si128(a, imm) THelper_mm_slli_si128<imm>(a)
  462. Y_FORCE_INLINE int _mm_cvtsi128_si32(const __m128i& op) {
  463. return vgetq_lane_s32(op.AsSi32x4, 0);
  464. }
  465. struct _mm_set_epi16 : TBaseWrapper<__m128i> {
  466. Y_FORCE_INLINE
  467. _mm_set_epi16(const short w7, const short w6,
  468. const short w5, const short w4,
  469. const short w3, const short w2,
  470. const short w1, const short w0) {
  471. int16x4_t d0 = {w0, w1, w2, w3};
  472. int16x4_t d1 = {w4, w5, w6, w7};
  473. TQType<int16x8_t>::As(Value) = vcombine_s16(d0, d1);
  474. }
  475. };
  476. struct _mm_setr_epi16 : TBaseWrapper<__m128i> {
  477. Y_FORCE_INLINE
  478. _mm_setr_epi16(const short w7, const short w6,
  479. const short w5, const short w4,
  480. const short w3, const short w2,
  481. const short w1, const short w0) {
  482. int16x4_t d0 = {w7, w6, w5, w4};
  483. int16x4_t d1 = {w3, w2, w1, w0};
  484. TQType<int16x8_t>::As(Value) = vcombine_s16(d0, d1);
  485. }
  486. };
  487. struct _mm_set_epi32 : TBaseWrapper<__m128i> {
  488. Y_FORCE_INLINE
  489. _mm_set_epi32(const int x3, const int x2,
  490. const int x1, const int x0) {
  491. int32x2_t d0 = {x0, x1};
  492. int32x2_t d1 = {x2, x3};
  493. TQType<int32x4_t>::As(Value) = vcombine_s32(d0, d1);
  494. }
  495. };
  496. struct _mm_setr_epi32 : TBaseWrapper<__m128i> {
  497. Y_FORCE_INLINE
  498. _mm_setr_epi32(const int x3, const int x2,
  499. const int x1, const int x0) {
  500. int32x2_t d0 = {x3, x2};
  501. int32x2_t d1 = {x1, x0};
  502. TQType<int32x4_t>::As(Value) = vcombine_s32(d0, d1);
  503. }
  504. };
  505. struct _mm_cvtsi32_si128 : TBaseWrapper<__m128i> {
  506. Y_FORCE_INLINE
  507. _mm_cvtsi32_si128(int op) {
  508. auto zero = vdupq_n_s32(0);
  509. TQType<int32x4_t>::As(Value) = vsetq_lane_s32(op, zero, 0);
  510. }
  511. };
  512. struct _mm_cvtsi64_si128 : TBaseWrapper<__m128i> {
  513. Y_FORCE_INLINE
  514. _mm_cvtsi64_si128(i64 op) {
  515. auto zero = vdupq_n_s64(0);
  516. TQType<int64x2_t>::As(Value) = vsetq_lane_s64(op, zero, 0);
  517. }
  518. };
  519. template <typename TOpOut, typename TOpIn,
  520. typename TFunc, TFunc* func,
  521. typename TCombine, TCombine* combine>
  522. struct TCombineWrapper : TBaseWrapper<__m128i> {
  523. Y_FORCE_INLINE
  524. TCombineWrapper(const __m128i op1, const __m128i op2) {
  525. TQType<TOpOut>::As(Value) =
  526. combine(func(TQType<TOpIn>::As(op1)),
  527. func(TQType<TOpIn>::As(op2)));
  528. }
  529. };
  530. using _mm_packs_epi16 =
  531. TCombineWrapper<int8x16_t, int16x8_t,
  532. decltype(vqmovn_s16), vqmovn_s16,
  533. decltype(vcombine_s8), vcombine_s8>;
  534. using _mm_packs_epi32 =
  535. TCombineWrapper<int16x8_t, int32x4_t,
  536. decltype(vqmovn_s32), vqmovn_s32,
  537. decltype(vcombine_s16), vcombine_s16>;
  538. using _mm_packus_epi16 =
  539. TCombineWrapper<uint8x16_t, int16x8_t,
  540. decltype(vqmovun_s16), vqmovun_s16,
  541. decltype(vcombine_u8), vcombine_u8>;
  542. template <typename TOpOut, typename TOpIn,
  543. typename TFunc, TFunc* func, typename... TParams>
  544. struct TScalarOutWrapper : TBaseWrapper<TOpOut> {
  545. Y_FORCE_INLINE
  546. TScalarOutWrapper(const __m128i op, TParams... params) {
  547. TBaseWrapper<TOpOut>::Value =
  548. func(TQType<TOpIn>::As(op), params...);
  549. }
  550. };
  551. template<int imm>
  552. int extract_epi8_arm(__m128i arg) {
  553. return vgetq_lane_u8(arg.AsUi8x16, imm);
  554. }
  555. template<int imm>
  556. int extract_epi16_arm(__m128i arg) {
  557. return vgetq_lane_u16(arg.AsUi16x8, imm);
  558. }
  559. template<int imm>
  560. int extract_epi32_arm(__m128i arg) {
  561. return vgetq_lane_s32(arg.AsSi32x4, imm);
  562. }
  563. template<int imm>
  564. long long extract_epi64_arm(__m128i arg) {
  565. return vgetq_lane_s64(arg.AsSi64x2, imm);
  566. }
  567. #define _mm_extract_epi8(op, imm) extract_epi8_arm<imm>(op)
  568. #define _mm_extract_epi16(op, imm) extract_epi16_arm<imm>(op)
  569. #define _mm_extract_epi32(op, imm) extract_epi32_arm<imm>(op)
  570. #define _mm_extract_epi64(op, imm) extract_epi64_arm<imm>(op)
  571. #define _mm_extract_ps(op, imm) _mm_extract_epi32(op, imm)
  572. static Y_FORCE_INLINE
  573. __m128i _mm_mul_epu32(__m128i op1, __m128i op2) {
  574. __m128i result;
  575. uint32x4_t r1 = vuzp1q_u32(op1.AsUi32x4, op2.AsUi32x4);
  576. uint32x4_t r2 = vuzp1q_u32(op2.AsUi32x4, op1.AsUi32x4);
  577. result.AsUi64x2 = vmull_u32(vget_low_u32(r1), vget_low_u32(r2));
  578. return result;
  579. }
  580. template <>
  581. struct TQType<float32x4_t> {
  582. static inline float32x4_t& As(__m128& value) {
  583. return value.AsFloat32x4;
  584. }
  585. static inline const float32x4_t& As(const __m128& value) {
  586. return value.AsFloat32x4;
  587. }
  588. static inline float32x4_t& As(__m128i& value) {
  589. return value.AsFloat32x4;
  590. }
  591. static inline const float32x4_t& As(const __m128i& value) {
  592. return value.AsFloat32x4;
  593. }
  594. };
  595. template <>
  596. struct TQType<float64x2_t> {
  597. static inline float64x2_t& As(__m128& value) {
  598. return value.AsFloat64x2;
  599. }
  600. static inline const float64x2_t& As(const __m128& value) {
  601. return value.AsFloat64x2;
  602. }
  603. static inline float64x2_t& As(__m128i& value) {
  604. return value.AsFloat64x2;
  605. }
  606. static inline const float64x2_t& As(const __m128i& value) {
  607. return value.AsFloat64x2;
  608. }
  609. static inline float64x2_t& As(__m128d& value) {
  610. return value;
  611. }
  612. static inline const float64x2_t& As(const __m128d& value) {
  613. return value;
  614. }
  615. };
  616. using _mm_set1_ps = TWrapperSimpleF<float32x4_t,
  617. decltype(vdupq_n_f32), vdupq_n_f32, const float>;
  618. using _mm_set_ps1 = TWrapperSimpleF<float32x4_t,
  619. decltype(vdupq_n_f32), vdupq_n_f32, const float>;
  620. struct _mm_setzero_ps : TBaseWrapper<__m128> {
  621. Y_FORCE_INLINE
  622. _mm_setzero_ps() {
  623. TQType<float32x4_t>::As(Value) = vdupq_n_f32(0.);
  624. }
  625. };
  626. Y_FORCE_INLINE __m128d _mm_setzero_pd() {
  627. return vdupq_n_f64(0.);
  628. }
  629. Y_FORCE_INLINE __m128 _mm_loadu_ps(const float* ptr) {
  630. __m128 result;
  631. result.AsFloat32x4 = vld1q_f32(ptr);
  632. return result;
  633. }
  634. Y_FORCE_INLINE __m128 _mm_load_ps(const float* ptr) {
  635. __m128 result;
  636. result.AsFloat32x4 = vld1q_f32(ptr);
  637. return result;
  638. }
  639. Y_FORCE_INLINE void _mm_storeu_ps(float* ptr, const __m128& op) {
  640. vst1q_f32(ptr, op.AsFloat32x4);
  641. }
  642. Y_FORCE_INLINE void _mm_store_ps(float* ptr, const __m128& op) {
  643. vst1q_f32(ptr, op.AsFloat32x4);
  644. }
  645. struct _mm_set_ps : TBaseWrapper<__m128> {
  646. Y_FORCE_INLINE
  647. _mm_set_ps(const float x3, const float x2,
  648. const float x1, const float x0) {
  649. float32x2_t d0 = {x0, x1};
  650. float32x2_t d1 = {x2, x3};
  651. TQType<float32x4_t>::As(Value) = vcombine_f32(d0, d1);
  652. }
  653. };
  654. Y_FORCE_INLINE __m128d _mm_set_pd(double d1, double d0) {
  655. const float64x1_t p0 = {d0};
  656. const float64x1_t p1 = {d1};
  657. return vcombine_f64(p0, p1);
  658. }
  659. Y_FORCE_INLINE __m128d _mm_loadu_pd(const double* d) {
  660. __m128d res;
  661. res = vld1q_f64(d);
  662. return res;
  663. }
  664. Y_FORCE_INLINE void _mm_storeu_pd(double* res, __m128d a) {
  665. vst1q_f64(res, a);
  666. }
  667. Y_FORCE_INLINE void _mm_store_pd(double* res, __m128d a) {
  668. vst1q_f64(res, a);
  669. }
  670. using _mm_add_ps = TWrapperDualF<float32x4_t, decltype(vaddq_f32), vaddq_f32>;
  671. using _mm_sub_ps = TWrapperDualF<float32x4_t, decltype(vsubq_f32), vsubq_f32>;
  672. using _mm_mul_ps = TWrapperDualF<float32x4_t, decltype(vmulq_f32), vmulq_f32>;
  673. using _mm_div_ps = TWrapperDualF<float32x4_t, decltype(vdivq_f32), vdivq_f32>;
  674. using _mm_cmpeq_ps = TWrapperDualF<float32x4_t, decltype(vceqq_f32), vceqq_f32>;
  675. using _mm_cmpgt_ps = TWrapperDualF<float32x4_t, decltype(vcgtq_f32), vcgtq_f32>;
  676. using _mm_max_ps = TWrapperDualF<float32x4_t, decltype(vmaxq_f32), vmaxq_f32>;
  677. using _mm_min_ps = TWrapperDualF<float32x4_t, decltype(vminq_f32), vminq_f32>;
  678. using _mm_add_pd = TWrapperDualF<float64x2_t, decltype(vaddq_f64), vaddq_f64, __m128d>;
  679. using _mm_sub_pd = TWrapperDualF<float64x2_t, decltype(vsubq_f64), vsubq_f64, __m128d>;
  680. using _mm_mul_pd = TWrapperDualF<float64x2_t, decltype(vmulq_f64), vmulq_f64, __m128d>;
  681. using _mm_div_pd = TWrapperDualF<float64x2_t, decltype(vdivq_f64), vdivq_f64, __m128d>;
  682. struct _mm_and_ps : TBaseWrapper<__m128> {
  683. Y_FORCE_INLINE
  684. _mm_and_ps(const __m128& op1, const __m128& op2) {
  685. TQType<uint64x2_t>::As(Value) =
  686. vandq_u64(TQType<uint64x2_t>::As(op1),
  687. TQType<uint64x2_t>::As(op2));
  688. }
  689. };
  690. Y_FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b) {
  691. return vandq_u64(a, b);
  692. }
  693. Y_FORCE_INLINE void _MM_TRANSPOSE4_PS(__m128& op0, __m128& op1, __m128& op2, __m128& op3) {
  694. float64x2_t im0 =
  695. (float64x2_t)vtrn1q_f32(op0.AsFloat32x4, op1.AsFloat32x4);
  696. float64x2_t im1 =
  697. (float64x2_t)vtrn2q_f32(op0.AsFloat32x4, op1.AsFloat32x4);
  698. float64x2_t im2 =
  699. (float64x2_t)vtrn1q_f32(op2.AsFloat32x4, op3.AsFloat32x4);
  700. float64x2_t im3 =
  701. (float64x2_t)vtrn2q_f32(op2.AsFloat32x4, op3.AsFloat32x4);
  702. TQType<float64x2_t>::As(op0) = vtrn1q_f64(im0, im2);
  703. TQType<float64x2_t>::As(op1) = vtrn1q_f64(im1, im3);
  704. TQType<float64x2_t>::As(op2) = vtrn2q_f64(im0, im2);
  705. TQType<float64x2_t>::As(op3) = vtrn2q_f64(im1, im3);
  706. };
  707. Y_FORCE_INLINE __m128 _mm_castsi128_ps(__m128i op) {
  708. return reinterpret_cast<__m128&>(op);
  709. }
  710. Y_FORCE_INLINE __m128i _mm_castps_si128(__m128 op) {
  711. return reinterpret_cast<__m128i&>(op);
  712. }
  713. template <typename TOpOut, typename TOpIn,
  714. typename TFunc, TFunc* func, typename... TParams>
  715. struct TCvtS2FWrapperSingle : TBaseWrapper<__m128> {
  716. Y_FORCE_INLINE
  717. TCvtS2FWrapperSingle(const __m128i& op, TParams... params) {
  718. TQType<TOpOut>::As(Value) =
  719. func(TQType<TOpIn>::As(op), params...);
  720. }
  721. };
  722. using _mm_cvtepi32_ps =
  723. TCvtS2FWrapperSingle<float32x4_t, int32x4_t,
  724. decltype(vcvtq_f32_s32), vcvtq_f32_s32>;
  725. template <typename TOpOut, typename TOpIn,
  726. typename TFunc, TFunc* func, typename... TParams>
  727. struct TCvtF2SWrapperSingle : TBaseWrapper<__m128i> {
  728. Y_FORCE_INLINE
  729. TCvtF2SWrapperSingle(const __m128& op, TParams... params) {
  730. TQType<TOpOut>::As(Value) =
  731. func(TQType<TOpIn>::As(op), params...);
  732. }
  733. };
  734. inline __m128i _mm_cvtps_epi32(__m128 a) {
  735. /// vcvtq_s32_f32 rounds to zero, but we need to round to the nearest.
  736. static const float32x4_t half = vdupq_n_f32(0.5f);
  737. static const float32x4_t negHalf = vdupq_n_f32(-0.5f);
  738. static const float32x4_t zero = vdupq_n_f32(0.0f);
  739. const float32x4_t corrections = vbslq_f32(vcgeq_f32(a.AsFloat32x4, zero), half, negHalf);
  740. __m128i res;
  741. res.AsSi32x4 = vcvtq_s32_f32(vaddq_f32(a.AsFloat32x4, corrections));
  742. return res;
  743. }
  744. using _mm_cvttps_epi32 =
  745. TCvtF2SWrapperSingle<int32x4_t, float32x4_t,
  746. decltype(vcvtq_s32_f32), vcvtq_s32_f32>;
  747. Y_FORCE_INLINE int
  748. _mm_movemask_ps(const __m128& op) {
  749. uint32x4_t mask = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
  750. uint32x4_t bits = vandq_u32(op.AsUi32x4, mask);
  751. int32x4_t shifts = {-31, -30, -29, -28};
  752. bits = vshlq_u32(bits, shifts);
  753. return vaddvq_u32(bits);
  754. }
  755. Y_FORCE_INLINE i64 _mm_cvtsi128_si64(__m128i a) {
  756. return vgetq_lane_s64(a.AsSi64x2, 0);
  757. }
  758. static inline void _mm_pause() {
  759. __asm__ ("YIELD");
  760. }
  761. static inline __m128 _mm_rsqrt_ps(__m128 a) {
  762. __m128 res;
  763. res.AsFloat32x4 = vrsqrteq_f32(a.AsFloat32x4);
  764. return res;
  765. }
  766. inline float _mm_cvtss_f32(__m128 a) {
  767. return a.AsFloat32x4[0];
  768. }
  769. inline __m128 _mm_cmpunord_ps(__m128 a, __m128 b) {
  770. __m128 res;
  771. res.AsUi32x4 = vorrq_u32(
  772. vmvnq_u32(vceqq_f32(a.AsFloat32x4, a.AsFloat32x4)), //!< 0xffffffff for all nans in a.
  773. vmvnq_u32(vceqq_f32(b.AsFloat32x4, b.AsFloat32x4)) //!< 0xffffffff all nans in b.
  774. );
  775. return res;
  776. }
  777. inline __m128 _mm_andnot_ps(__m128 a, __m128 b) {
  778. __m128 res;
  779. res.AsFloat32x4 = vandq_u32(vmvnq_u32(a.AsUi32x4), b.AsUi32x4);
  780. return res;
  781. }
  782. inline void _mm_store_ss(float* p, __m128 a) {
  783. *p = vgetq_lane_f32(a.AsFloat32x4, 0);
  784. }
  785. inline float vgetg_lane_f32_switch(float32x4_t a, ui8 b) {
  786. switch (b & 0x3) {
  787. case 0:
  788. return vgetq_lane_f32(a, 0);
  789. case 1:
  790. return vgetq_lane_f32(a, 1);
  791. case 2:
  792. return vgetq_lane_f32(a, 2);
  793. case 3:
  794. return vgetq_lane_f32(a, 3);
  795. }
  796. return 0;
  797. }
  798. inline __m128 _mm_shuffle_ps(__m128 a, __m128 b, const ShuffleStruct4& shuf) {
  799. __m128 ret;
  800. ret.AsFloat32x4 = vmovq_n_f32(vgetg_lane_f32_switch(a.AsFloat32x4, shuf.x[0]));
  801. ret.AsFloat32x4 = vsetq_lane_f32(vgetg_lane_f32_switch(a.AsFloat32x4, shuf.x[1]), ret.AsFloat32x4, 1);
  802. ret.AsFloat32x4 = vsetq_lane_f32(vgetg_lane_f32_switch(b.AsFloat32x4, shuf.x[2]), ret.AsFloat32x4, 2);
  803. ret.AsFloat32x4 = vsetq_lane_f32(vgetg_lane_f32_switch(b.AsFloat32x4, shuf.x[3]), ret.AsFloat32x4, 3);
  804. return ret;
  805. }
  806. inline __m128 _mm_or_ps(__m128 a, __m128 b) {
  807. __m128 res;
  808. res.AsUi32x4 = vorrq_u32(a.AsUi32x4, b.AsUi32x4);
  809. return res;
  810. }
  811. inline __m128i _mm_sad_epu8(__m128i a, __m128i b) {
  812. uint16x8_t t = vpaddlq_u8(vabdq_u8(a.AsUi8x16, b.AsUi8x16));
  813. uint16_t r0 = t[0] + t[1] + t[2] + t[3];
  814. uint16_t r4 = t[4] + t[5] + t[6] + t[7];
  815. uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0);
  816. __m128i ans;
  817. ans.AsUi16x8 = vsetq_lane_u16(r4, r, 4);
  818. return ans;
  819. }
  820. Y_FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b) {
  821. __m128i ans;
  822. ans.AsSi8x16 = vqsubq_s8(a.AsSi8x16, b.AsSi8x16);
  823. return ans;
  824. }
  825. Y_FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b) {
  826. __m128i ans;
  827. ans.AsSi16x8 = vqsubq_s16(a.AsSi16x8, b.AsSi16x8);
  828. return ans;
  829. }
  830. Y_FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b) {
  831. __m128i ans;
  832. ans.AsUi8x16 = vqsubq_u8(a.AsUi8x16, b.AsUi8x16);
  833. return ans;
  834. }
  835. Y_FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b) {
  836. __m128i ans;
  837. ans.AsUi16x8 = vqsubq_u16(a.AsUi16x8, b.AsUi16x8);
  838. return ans;
  839. }
  840. Y_FORCE_INLINE __m128d _mm_castsi128_pd(__m128i __A) {
  841. return reinterpret_cast<__m128d&>(__A);
  842. }
  843. Y_FORCE_INLINE __m128i _mm_set_epi8(ui8 i15, ui8 i14, ui8 i13, ui8 i12, ui8 i11, ui8 i10, ui8 i9, ui8 i8,
  844. ui8 i7, ui8 i6, ui8 i5, ui8 i4, ui8 i3, ui8 i2, ui8 i1, ui8 i0)
  845. {
  846. int a0 = i0 | (i1<<8) | (i2<<16) | (i3<<24);
  847. int a1 = i4 | (i5<<8) | (i6<<16) | (i7<<24);
  848. int a2 = i8 | (i9<<8) | (i10<<16) | (i11<<24);
  849. int a3 = i12 | (i13<<8) | (i14<<16) | (i15<<24);
  850. return _mm_set_epi32(a3, a2, a1, a0);
  851. }
  852. Y_FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b) {
  853. __m128i ans;
  854. ans.AsUi8x16 = vmaxq_u8(a.AsUi8x16, b.AsUi8x16);
  855. return ans;
  856. }
  857. #pragma GCC diagnostic push
  858. #pragma GCC diagnostic ignored "-Wuninitialized"
  859. Y_FORCE_INLINE __m128d _mm_undefined_pd(void) {
  860. __m128d ans = ans;
  861. return ans;
  862. }
  863. #pragma GCC diagnostic pop
  864. Y_FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double* b) {
  865. a[1] = *b;
  866. return a;
  867. }
  868. Y_FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double* b) {
  869. a[0] = *b;
  870. return a;
  871. }
  872. Y_FORCE_INLINE double _mm_cvtsd_f64(__m128d a) {
  873. return a[0];
  874. }
  875. Y_FORCE_INLINE __m128d _mm_shuffle_pd(__m128d a, __m128d b, int mask) {
  876. __m128d result;
  877. const int litmsk = mask & 0x3;
  878. if (litmsk == 0)
  879. result = vzip1q_f64(a, b);
  880. else if (litmsk == 1)
  881. result = __builtin_shufflevector(a, b, 1, 2);
  882. else if (litmsk == 2)
  883. result = __builtin_shufflevector(a, b, 0, 3);
  884. else
  885. result = vzip2q_f64(a, b);
  886. return result;
  887. }