poly1305-armv8.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. #include "arm_arch.h"
  2. .text
  3. // forward "declarations" are required for Apple
  4. .hidden OPENSSL_armcap_P
  5. .globl poly1305_init
  6. .hidden poly1305_init
  7. .globl poly1305_blocks
  8. .hidden poly1305_blocks
  9. .globl poly1305_emit
  10. .hidden poly1305_emit
  11. .type poly1305_init,%function
  12. .align 5
  13. poly1305_init:
  14. cmp x1,xzr
  15. stp xzr,xzr,[x0] // zero hash value
  16. stp xzr,xzr,[x0,#16] // [along with is_base2_26]
  17. csel x0,xzr,x0,eq
  18. b.eq .Lno_key
  19. #ifdef __ILP32__
  20. ldrsw x11,.LOPENSSL_armcap_P
  21. #else
  22. ldr x11,.LOPENSSL_armcap_P
  23. #endif
  24. adr x10,.LOPENSSL_armcap_P
  25. ldp x7,x8,[x1] // load key
  26. mov x9,#0xfffffffc0fffffff
  27. movk x9,#0x0fff,lsl#48
  28. ldr w17,[x10,x11]
  29. #ifdef __ARMEB__
  30. rev x7,x7 // flip bytes
  31. rev x8,x8
  32. #endif
  33. and x7,x7,x9 // &=0ffffffc0fffffff
  34. and x9,x9,#-4
  35. and x8,x8,x9 // &=0ffffffc0ffffffc
  36. stp x7,x8,[x0,#32] // save key value
  37. tst w17,#ARMV7_NEON
  38. adr x12,poly1305_blocks
  39. adr x7,poly1305_blocks_neon
  40. adr x13,poly1305_emit
  41. adr x8,poly1305_emit_neon
  42. csel x12,x12,x7,eq
  43. csel x13,x13,x8,eq
  44. #ifdef __ILP32__
  45. stp w12,w13,[x2]
  46. #else
  47. stp x12,x13,[x2]
  48. #endif
  49. mov x0,#1
  50. .Lno_key:
  51. ret
  52. .size poly1305_init,.-poly1305_init
  53. .type poly1305_blocks,%function
  54. .align 5
  55. poly1305_blocks:
  56. ands x2,x2,#-16
  57. b.eq .Lno_data
  58. ldp x4,x5,[x0] // load hash value
  59. ldp x7,x8,[x0,#32] // load key value
  60. ldr x6,[x0,#16]
  61. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  62. b .Loop
  63. .align 5
  64. .Loop:
  65. ldp x10,x11,[x1],#16 // load input
  66. sub x2,x2,#16
  67. #ifdef __ARMEB__
  68. rev x10,x10
  69. rev x11,x11
  70. #endif
  71. adds x4,x4,x10 // accumulate input
  72. adcs x5,x5,x11
  73. mul x12,x4,x7 // h0*r0
  74. adc x6,x6,x3
  75. umulh x13,x4,x7
  76. mul x10,x5,x9 // h1*5*r1
  77. umulh x11,x5,x9
  78. adds x12,x12,x10
  79. mul x10,x4,x8 // h0*r1
  80. adc x13,x13,x11
  81. umulh x14,x4,x8
  82. adds x13,x13,x10
  83. mul x10,x5,x7 // h1*r0
  84. adc x14,x14,xzr
  85. umulh x11,x5,x7
  86. adds x13,x13,x10
  87. mul x10,x6,x9 // h2*5*r1
  88. adc x14,x14,x11
  89. mul x11,x6,x7 // h2*r0
  90. adds x13,x13,x10
  91. adc x14,x14,x11
  92. and x10,x14,#-4 // final reduction
  93. and x6,x14,#3
  94. add x10,x10,x14,lsr#2
  95. adds x4,x12,x10
  96. adcs x5,x13,xzr
  97. adc x6,x6,xzr
  98. cbnz x2,.Loop
  99. stp x4,x5,[x0] // store hash value
  100. str x6,[x0,#16]
  101. .Lno_data:
  102. ret
  103. .size poly1305_blocks,.-poly1305_blocks
  104. .type poly1305_emit,%function
  105. .align 5
  106. poly1305_emit:
  107. ldp x4,x5,[x0] // load hash base 2^64
  108. ldr x6,[x0,#16]
  109. ldp x10,x11,[x2] // load nonce
  110. adds x12,x4,#5 // compare to modulus
  111. adcs x13,x5,xzr
  112. adc x14,x6,xzr
  113. tst x14,#-4 // see if it's carried/borrowed
  114. csel x4,x4,x12,eq
  115. csel x5,x5,x13,eq
  116. #ifdef __ARMEB__
  117. ror x10,x10,#32 // flip nonce words
  118. ror x11,x11,#32
  119. #endif
  120. adds x4,x4,x10 // accumulate nonce
  121. adc x5,x5,x11
  122. #ifdef __ARMEB__
  123. rev x4,x4 // flip output bytes
  124. rev x5,x5
  125. #endif
  126. stp x4,x5,[x1] // write result
  127. ret
  128. .size poly1305_emit,.-poly1305_emit
  129. .type poly1305_mult,%function
  130. .align 5
  131. poly1305_mult:
  132. mul x12,x4,x7 // h0*r0
  133. umulh x13,x4,x7
  134. mul x10,x5,x9 // h1*5*r1
  135. umulh x11,x5,x9
  136. adds x12,x12,x10
  137. mul x10,x4,x8 // h0*r1
  138. adc x13,x13,x11
  139. umulh x14,x4,x8
  140. adds x13,x13,x10
  141. mul x10,x5,x7 // h1*r0
  142. adc x14,x14,xzr
  143. umulh x11,x5,x7
  144. adds x13,x13,x10
  145. mul x10,x6,x9 // h2*5*r1
  146. adc x14,x14,x11
  147. mul x11,x6,x7 // h2*r0
  148. adds x13,x13,x10
  149. adc x14,x14,x11
  150. and x10,x14,#-4 // final reduction
  151. and x6,x14,#3
  152. add x10,x10,x14,lsr#2
  153. adds x4,x12,x10
  154. adcs x5,x13,xzr
  155. adc x6,x6,xzr
  156. ret
  157. .size poly1305_mult,.-poly1305_mult
  158. .type poly1305_splat,%function
  159. .align 5
  160. poly1305_splat:
  161. and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
  162. ubfx x13,x4,#26,#26
  163. extr x14,x5,x4,#52
  164. and x14,x14,#0x03ffffff
  165. ubfx x15,x5,#14,#26
  166. extr x16,x6,x5,#40
  167. str w12,[x0,#16*0] // r0
  168. add w12,w13,w13,lsl#2 // r1*5
  169. str w13,[x0,#16*1] // r1
  170. add w13,w14,w14,lsl#2 // r2*5
  171. str w12,[x0,#16*2] // s1
  172. str w14,[x0,#16*3] // r2
  173. add w14,w15,w15,lsl#2 // r3*5
  174. str w13,[x0,#16*4] // s2
  175. str w15,[x0,#16*5] // r3
  176. add w15,w16,w16,lsl#2 // r4*5
  177. str w14,[x0,#16*6] // s3
  178. str w16,[x0,#16*7] // r4
  179. str w15,[x0,#16*8] // s4
  180. ret
  181. .size poly1305_splat,.-poly1305_splat
  182. .type poly1305_blocks_neon,%function
  183. .align 5
  184. poly1305_blocks_neon:
  185. ldr x17,[x0,#24]
  186. cmp x2,#128
  187. b.hs .Lblocks_neon
  188. cbz x17,poly1305_blocks
  189. .Lblocks_neon:
  190. .inst 0xd503233f // paciasp
  191. stp x29,x30,[sp,#-80]!
  192. add x29,sp,#0
  193. ands x2,x2,#-16
  194. b.eq .Lno_data_neon
  195. cbz x17,.Lbase2_64_neon
  196. ldp w10,w11,[x0] // load hash value base 2^26
  197. ldp w12,w13,[x0,#8]
  198. ldr w14,[x0,#16]
  199. tst x2,#31
  200. b.eq .Leven_neon
  201. ldp x7,x8,[x0,#32] // load key value
  202. add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
  203. lsr x5,x12,#12
  204. adds x4,x4,x12,lsl#52
  205. add x5,x5,x13,lsl#14
  206. adc x5,x5,xzr
  207. lsr x6,x14,#24
  208. adds x5,x5,x14,lsl#40
  209. adc x14,x6,xzr // can be partially reduced...
  210. ldp x12,x13,[x1],#16 // load input
  211. sub x2,x2,#16
  212. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  213. and x10,x14,#-4 // ... so reduce
  214. and x6,x14,#3
  215. add x10,x10,x14,lsr#2
  216. adds x4,x4,x10
  217. adcs x5,x5,xzr
  218. adc x6,x6,xzr
  219. #ifdef __ARMEB__
  220. rev x12,x12
  221. rev x13,x13
  222. #endif
  223. adds x4,x4,x12 // accumulate input
  224. adcs x5,x5,x13
  225. adc x6,x6,x3
  226. bl poly1305_mult
  227. ldr x30,[sp,#8]
  228. cbz x3,.Lstore_base2_64_neon
  229. and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
  230. ubfx x11,x4,#26,#26
  231. extr x12,x5,x4,#52
  232. and x12,x12,#0x03ffffff
  233. ubfx x13,x5,#14,#26
  234. extr x14,x6,x5,#40
  235. cbnz x2,.Leven_neon
  236. stp w10,w11,[x0] // store hash value base 2^26
  237. stp w12,w13,[x0,#8]
  238. str w14,[x0,#16]
  239. b .Lno_data_neon
  240. .align 4
  241. .Lstore_base2_64_neon:
  242. stp x4,x5,[x0] // store hash value base 2^64
  243. stp x6,xzr,[x0,#16] // note that is_base2_26 is zeroed
  244. b .Lno_data_neon
  245. .align 4
  246. .Lbase2_64_neon:
  247. ldp x7,x8,[x0,#32] // load key value
  248. ldp x4,x5,[x0] // load hash value base 2^64
  249. ldr x6,[x0,#16]
  250. tst x2,#31
  251. b.eq .Linit_neon
  252. ldp x12,x13,[x1],#16 // load input
  253. sub x2,x2,#16
  254. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  255. #ifdef __ARMEB__
  256. rev x12,x12
  257. rev x13,x13
  258. #endif
  259. adds x4,x4,x12 // accumulate input
  260. adcs x5,x5,x13
  261. adc x6,x6,x3
  262. bl poly1305_mult
  263. .Linit_neon:
  264. and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
  265. ubfx x11,x4,#26,#26
  266. extr x12,x5,x4,#52
  267. and x12,x12,#0x03ffffff
  268. ubfx x13,x5,#14,#26
  269. extr x14,x6,x5,#40
  270. stp d8,d9,[sp,#16] // meet ABI requirements
  271. stp d10,d11,[sp,#32]
  272. stp d12,d13,[sp,#48]
  273. stp d14,d15,[sp,#64]
  274. fmov d24,x10
  275. fmov d25,x11
  276. fmov d26,x12
  277. fmov d27,x13
  278. fmov d28,x14
  279. ////////////////////////////////// initialize r^n table
  280. mov x4,x7 // r^1
  281. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  282. mov x5,x8
  283. mov x6,xzr
  284. add x0,x0,#48+12
  285. bl poly1305_splat
  286. bl poly1305_mult // r^2
  287. sub x0,x0,#4
  288. bl poly1305_splat
  289. bl poly1305_mult // r^3
  290. sub x0,x0,#4
  291. bl poly1305_splat
  292. bl poly1305_mult // r^4
  293. sub x0,x0,#4
  294. bl poly1305_splat
  295. ldr x30,[sp,#8]
  296. add x16,x1,#32
  297. adr x17,.Lzeros
  298. subs x2,x2,#64
  299. csel x16,x17,x16,lo
  300. mov x4,#1
  301. str x4,[x0,#-24] // set is_base2_26
  302. sub x0,x0,#48 // restore original x0
  303. b .Ldo_neon
  304. .align 4
  305. .Leven_neon:
  306. add x16,x1,#32
  307. adr x17,.Lzeros
  308. subs x2,x2,#64
  309. csel x16,x17,x16,lo
  310. stp d8,d9,[sp,#16] // meet ABI requirements
  311. stp d10,d11,[sp,#32]
  312. stp d12,d13,[sp,#48]
  313. stp d14,d15,[sp,#64]
  314. fmov d24,x10
  315. fmov d25,x11
  316. fmov d26,x12
  317. fmov d27,x13
  318. fmov d28,x14
  319. .Ldo_neon:
  320. ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
  321. ldp x9,x13,[x16],#48
  322. lsl x3,x3,#24
  323. add x15,x0,#48
  324. #ifdef __ARMEB__
  325. rev x8,x8
  326. rev x12,x12
  327. rev x9,x9
  328. rev x13,x13
  329. #endif
  330. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  331. and x5,x9,#0x03ffffff
  332. ubfx x6,x8,#26,#26
  333. ubfx x7,x9,#26,#26
  334. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  335. extr x8,x12,x8,#52
  336. extr x9,x13,x9,#52
  337. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  338. fmov d14,x4
  339. and x8,x8,#0x03ffffff
  340. and x9,x9,#0x03ffffff
  341. ubfx x10,x12,#14,#26
  342. ubfx x11,x13,#14,#26
  343. add x12,x3,x12,lsr#40
  344. add x13,x3,x13,lsr#40
  345. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  346. fmov d15,x6
  347. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  348. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  349. fmov d16,x8
  350. fmov d17,x10
  351. fmov d18,x12
  352. ldp x8,x12,[x1],#16 // inp[0:1]
  353. ldp x9,x13,[x1],#48
  354. ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
  355. ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
  356. ld1 {v8.4s},[x15]
  357. #ifdef __ARMEB__
  358. rev x8,x8
  359. rev x12,x12
  360. rev x9,x9
  361. rev x13,x13
  362. #endif
  363. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  364. and x5,x9,#0x03ffffff
  365. ubfx x6,x8,#26,#26
  366. ubfx x7,x9,#26,#26
  367. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  368. extr x8,x12,x8,#52
  369. extr x9,x13,x9,#52
  370. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  371. fmov d9,x4
  372. and x8,x8,#0x03ffffff
  373. and x9,x9,#0x03ffffff
  374. ubfx x10,x12,#14,#26
  375. ubfx x11,x13,#14,#26
  376. add x12,x3,x12,lsr#40
  377. add x13,x3,x13,lsr#40
  378. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  379. fmov d10,x6
  380. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  381. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  382. movi v31.2d,#-1
  383. fmov d11,x8
  384. fmov d12,x10
  385. fmov d13,x12
  386. ushr v31.2d,v31.2d,#38
  387. b.ls .Lskip_loop
  388. .align 4
  389. .Loop_neon:
  390. ////////////////////////////////////////////////////////////////
  391. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  392. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  393. // ___________________/
  394. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  395. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  396. // ___________________/ ____________________/
  397. //
  398. // Note that we start with inp[2:3]*r^2. This is because it
  399. // doesn't depend on reduction in previous iteration.
  400. ////////////////////////////////////////////////////////////////
  401. // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
  402. // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
  403. // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
  404. // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
  405. // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
  406. subs x2,x2,#64
  407. umull v23.2d,v14.2s,v7.s[2]
  408. csel x16,x17,x16,lo
  409. umull v22.2d,v14.2s,v5.s[2]
  410. umull v21.2d,v14.2s,v3.s[2]
  411. ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
  412. umull v20.2d,v14.2s,v1.s[2]
  413. ldp x9,x13,[x16],#48
  414. umull v19.2d,v14.2s,v0.s[2]
  415. #ifdef __ARMEB__
  416. rev x8,x8
  417. rev x12,x12
  418. rev x9,x9
  419. rev x13,x13
  420. #endif
  421. umlal v23.2d,v15.2s,v5.s[2]
  422. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  423. umlal v22.2d,v15.2s,v3.s[2]
  424. and x5,x9,#0x03ffffff
  425. umlal v21.2d,v15.2s,v1.s[2]
  426. ubfx x6,x8,#26,#26
  427. umlal v20.2d,v15.2s,v0.s[2]
  428. ubfx x7,x9,#26,#26
  429. umlal v19.2d,v15.2s,v8.s[2]
  430. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  431. umlal v23.2d,v16.2s,v3.s[2]
  432. extr x8,x12,x8,#52
  433. umlal v22.2d,v16.2s,v1.s[2]
  434. extr x9,x13,x9,#52
  435. umlal v21.2d,v16.2s,v0.s[2]
  436. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  437. umlal v20.2d,v16.2s,v8.s[2]
  438. fmov d14,x4
  439. umlal v19.2d,v16.2s,v6.s[2]
  440. and x8,x8,#0x03ffffff
  441. umlal v23.2d,v17.2s,v1.s[2]
  442. and x9,x9,#0x03ffffff
  443. umlal v22.2d,v17.2s,v0.s[2]
  444. ubfx x10,x12,#14,#26
  445. umlal v21.2d,v17.2s,v8.s[2]
  446. ubfx x11,x13,#14,#26
  447. umlal v20.2d,v17.2s,v6.s[2]
  448. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  449. umlal v19.2d,v17.2s,v4.s[2]
  450. fmov d15,x6
  451. add v11.2s,v11.2s,v26.2s
  452. add x12,x3,x12,lsr#40
  453. umlal v23.2d,v18.2s,v0.s[2]
  454. add x13,x3,x13,lsr#40
  455. umlal v22.2d,v18.2s,v8.s[2]
  456. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  457. umlal v21.2d,v18.2s,v6.s[2]
  458. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  459. umlal v20.2d,v18.2s,v4.s[2]
  460. fmov d16,x8
  461. umlal v19.2d,v18.2s,v2.s[2]
  462. fmov d17,x10
  463. ////////////////////////////////////////////////////////////////
  464. // (hash+inp[0:1])*r^4 and accumulate
  465. add v9.2s,v9.2s,v24.2s
  466. fmov d18,x12
  467. umlal v22.2d,v11.2s,v1.s[0]
  468. ldp x8,x12,[x1],#16 // inp[0:1]
  469. umlal v19.2d,v11.2s,v6.s[0]
  470. ldp x9,x13,[x1],#48
  471. umlal v23.2d,v11.2s,v3.s[0]
  472. umlal v20.2d,v11.2s,v8.s[0]
  473. umlal v21.2d,v11.2s,v0.s[0]
  474. #ifdef __ARMEB__
  475. rev x8,x8
  476. rev x12,x12
  477. rev x9,x9
  478. rev x13,x13
  479. #endif
  480. add v10.2s,v10.2s,v25.2s
  481. umlal v22.2d,v9.2s,v5.s[0]
  482. umlal v23.2d,v9.2s,v7.s[0]
  483. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  484. umlal v21.2d,v9.2s,v3.s[0]
  485. and x5,x9,#0x03ffffff
  486. umlal v19.2d,v9.2s,v0.s[0]
  487. ubfx x6,x8,#26,#26
  488. umlal v20.2d,v9.2s,v1.s[0]
  489. ubfx x7,x9,#26,#26
  490. add v12.2s,v12.2s,v27.2s
  491. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  492. umlal v22.2d,v10.2s,v3.s[0]
  493. extr x8,x12,x8,#52
  494. umlal v23.2d,v10.2s,v5.s[0]
  495. extr x9,x13,x9,#52
  496. umlal v19.2d,v10.2s,v8.s[0]
  497. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  498. umlal v21.2d,v10.2s,v1.s[0]
  499. fmov d9,x4
  500. umlal v20.2d,v10.2s,v0.s[0]
  501. and x8,x8,#0x03ffffff
  502. add v13.2s,v13.2s,v28.2s
  503. and x9,x9,#0x03ffffff
  504. umlal v22.2d,v12.2s,v0.s[0]
  505. ubfx x10,x12,#14,#26
  506. umlal v19.2d,v12.2s,v4.s[0]
  507. ubfx x11,x13,#14,#26
  508. umlal v23.2d,v12.2s,v1.s[0]
  509. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  510. umlal v20.2d,v12.2s,v6.s[0]
  511. fmov d10,x6
  512. umlal v21.2d,v12.2s,v8.s[0]
  513. add x12,x3,x12,lsr#40
  514. umlal v22.2d,v13.2s,v8.s[0]
  515. add x13,x3,x13,lsr#40
  516. umlal v19.2d,v13.2s,v2.s[0]
  517. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  518. umlal v23.2d,v13.2s,v0.s[0]
  519. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  520. umlal v20.2d,v13.2s,v4.s[0]
  521. fmov d11,x8
  522. umlal v21.2d,v13.2s,v6.s[0]
  523. fmov d12,x10
  524. fmov d13,x12
  525. /////////////////////////////////////////////////////////////////
  526. // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  527. // and P. Schwabe
  528. //
  529. // [see discussion in poly1305-armv4 module]
  530. ushr v29.2d,v22.2d,#26
  531. xtn v27.2s,v22.2d
  532. ushr v30.2d,v19.2d,#26
  533. and v19.16b,v19.16b,v31.16b
  534. add v23.2d,v23.2d,v29.2d // h3 -> h4
  535. bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
  536. add v20.2d,v20.2d,v30.2d // h0 -> h1
  537. ushr v29.2d,v23.2d,#26
  538. xtn v28.2s,v23.2d
  539. ushr v30.2d,v20.2d,#26
  540. xtn v25.2s,v20.2d
  541. bic v28.2s,#0xfc,lsl#24
  542. add v21.2d,v21.2d,v30.2d // h1 -> h2
  543. add v19.2d,v19.2d,v29.2d
  544. shl v29.2d,v29.2d,#2
  545. shrn v30.2s,v21.2d,#26
  546. xtn v26.2s,v21.2d
  547. add v19.2d,v19.2d,v29.2d // h4 -> h0
  548. bic v25.2s,#0xfc,lsl#24
  549. add v27.2s,v27.2s,v30.2s // h2 -> h3
  550. bic v26.2s,#0xfc,lsl#24
  551. shrn v29.2s,v19.2d,#26
  552. xtn v24.2s,v19.2d
  553. ushr v30.2s,v27.2s,#26
  554. bic v27.2s,#0xfc,lsl#24
  555. bic v24.2s,#0xfc,lsl#24
  556. add v25.2s,v25.2s,v29.2s // h0 -> h1
  557. add v28.2s,v28.2s,v30.2s // h3 -> h4
  558. b.hi .Loop_neon
  559. .Lskip_loop:
  560. dup v16.2d,v16.d[0]
  561. add v11.2s,v11.2s,v26.2s
  562. ////////////////////////////////////////////////////////////////
  563. // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  564. adds x2,x2,#32
  565. b.ne .Long_tail
  566. dup v16.2d,v11.d[0]
  567. add v14.2s,v9.2s,v24.2s
  568. add v17.2s,v12.2s,v27.2s
  569. add v15.2s,v10.2s,v25.2s
  570. add v18.2s,v13.2s,v28.2s
  571. .Long_tail:
  572. dup v14.2d,v14.d[0]
  573. umull2 v19.2d,v16.4s,v6.4s
  574. umull2 v22.2d,v16.4s,v1.4s
  575. umull2 v23.2d,v16.4s,v3.4s
  576. umull2 v21.2d,v16.4s,v0.4s
  577. umull2 v20.2d,v16.4s,v8.4s
  578. dup v15.2d,v15.d[0]
  579. umlal2 v19.2d,v14.4s,v0.4s
  580. umlal2 v21.2d,v14.4s,v3.4s
  581. umlal2 v22.2d,v14.4s,v5.4s
  582. umlal2 v23.2d,v14.4s,v7.4s
  583. umlal2 v20.2d,v14.4s,v1.4s
  584. dup v17.2d,v17.d[0]
  585. umlal2 v19.2d,v15.4s,v8.4s
  586. umlal2 v22.2d,v15.4s,v3.4s
  587. umlal2 v21.2d,v15.4s,v1.4s
  588. umlal2 v23.2d,v15.4s,v5.4s
  589. umlal2 v20.2d,v15.4s,v0.4s
  590. dup v18.2d,v18.d[0]
  591. umlal2 v22.2d,v17.4s,v0.4s
  592. umlal2 v23.2d,v17.4s,v1.4s
  593. umlal2 v19.2d,v17.4s,v4.4s
  594. umlal2 v20.2d,v17.4s,v6.4s
  595. umlal2 v21.2d,v17.4s,v8.4s
  596. umlal2 v22.2d,v18.4s,v8.4s
  597. umlal2 v19.2d,v18.4s,v2.4s
  598. umlal2 v23.2d,v18.4s,v0.4s
  599. umlal2 v20.2d,v18.4s,v4.4s
  600. umlal2 v21.2d,v18.4s,v6.4s
  601. b.eq .Lshort_tail
  602. ////////////////////////////////////////////////////////////////
  603. // (hash+inp[0:1])*r^4:r^3 and accumulate
  604. add v9.2s,v9.2s,v24.2s
  605. umlal v22.2d,v11.2s,v1.2s
  606. umlal v19.2d,v11.2s,v6.2s
  607. umlal v23.2d,v11.2s,v3.2s
  608. umlal v20.2d,v11.2s,v8.2s
  609. umlal v21.2d,v11.2s,v0.2s
  610. add v10.2s,v10.2s,v25.2s
  611. umlal v22.2d,v9.2s,v5.2s
  612. umlal v19.2d,v9.2s,v0.2s
  613. umlal v23.2d,v9.2s,v7.2s
  614. umlal v20.2d,v9.2s,v1.2s
  615. umlal v21.2d,v9.2s,v3.2s
  616. add v12.2s,v12.2s,v27.2s
  617. umlal v22.2d,v10.2s,v3.2s
  618. umlal v19.2d,v10.2s,v8.2s
  619. umlal v23.2d,v10.2s,v5.2s
  620. umlal v20.2d,v10.2s,v0.2s
  621. umlal v21.2d,v10.2s,v1.2s
  622. add v13.2s,v13.2s,v28.2s
  623. umlal v22.2d,v12.2s,v0.2s
  624. umlal v19.2d,v12.2s,v4.2s
  625. umlal v23.2d,v12.2s,v1.2s
  626. umlal v20.2d,v12.2s,v6.2s
  627. umlal v21.2d,v12.2s,v8.2s
  628. umlal v22.2d,v13.2s,v8.2s
  629. umlal v19.2d,v13.2s,v2.2s
  630. umlal v23.2d,v13.2s,v0.2s
  631. umlal v20.2d,v13.2s,v4.2s
  632. umlal v21.2d,v13.2s,v6.2s
  633. .Lshort_tail:
  634. ////////////////////////////////////////////////////////////////
  635. // horizontal add
  636. addp v22.2d,v22.2d,v22.2d
  637. ldp d8,d9,[sp,#16] // meet ABI requirements
  638. addp v19.2d,v19.2d,v19.2d
  639. ldp d10,d11,[sp,#32]
  640. addp v23.2d,v23.2d,v23.2d
  641. ldp d12,d13,[sp,#48]
  642. addp v20.2d,v20.2d,v20.2d
  643. ldp d14,d15,[sp,#64]
  644. addp v21.2d,v21.2d,v21.2d
  645. ////////////////////////////////////////////////////////////////
  646. // lazy reduction, but without narrowing
  647. ushr v29.2d,v22.2d,#26
  648. and v22.16b,v22.16b,v31.16b
  649. ushr v30.2d,v19.2d,#26
  650. and v19.16b,v19.16b,v31.16b
  651. add v23.2d,v23.2d,v29.2d // h3 -> h4
  652. add v20.2d,v20.2d,v30.2d // h0 -> h1
  653. ushr v29.2d,v23.2d,#26
  654. and v23.16b,v23.16b,v31.16b
  655. ushr v30.2d,v20.2d,#26
  656. and v20.16b,v20.16b,v31.16b
  657. add v21.2d,v21.2d,v30.2d // h1 -> h2
  658. add v19.2d,v19.2d,v29.2d
  659. shl v29.2d,v29.2d,#2
  660. ushr v30.2d,v21.2d,#26
  661. and v21.16b,v21.16b,v31.16b
  662. add v19.2d,v19.2d,v29.2d // h4 -> h0
  663. add v22.2d,v22.2d,v30.2d // h2 -> h3
  664. ushr v29.2d,v19.2d,#26
  665. and v19.16b,v19.16b,v31.16b
  666. ushr v30.2d,v22.2d,#26
  667. and v22.16b,v22.16b,v31.16b
  668. add v20.2d,v20.2d,v29.2d // h0 -> h1
  669. add v23.2d,v23.2d,v30.2d // h3 -> h4
  670. ////////////////////////////////////////////////////////////////
  671. // write the result, can be partially reduced
  672. st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
  673. st1 {v23.s}[0],[x0]
  674. .Lno_data_neon:
  675. ldr x29,[sp],#80
  676. .inst 0xd50323bf // autiasp
  677. ret
  678. .size poly1305_blocks_neon,.-poly1305_blocks_neon
  679. .type poly1305_emit_neon,%function
  680. .align 5
  681. poly1305_emit_neon:
  682. ldr x17,[x0,#24]
  683. cbz x17,poly1305_emit
  684. ldp w10,w11,[x0] // load hash value base 2^26
  685. ldp w12,w13,[x0,#8]
  686. ldr w14,[x0,#16]
  687. add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
  688. lsr x5,x12,#12
  689. adds x4,x4,x12,lsl#52
  690. add x5,x5,x13,lsl#14
  691. adc x5,x5,xzr
  692. lsr x6,x14,#24
  693. adds x5,x5,x14,lsl#40
  694. adc x6,x6,xzr // can be partially reduced...
  695. ldp x10,x11,[x2] // load nonce
  696. and x12,x6,#-4 // ... so reduce
  697. add x12,x12,x6,lsr#2
  698. and x6,x6,#3
  699. adds x4,x4,x12
  700. adcs x5,x5,xzr
  701. adc x6,x6,xzr
  702. adds x12,x4,#5 // compare to modulus
  703. adcs x13,x5,xzr
  704. adc x14,x6,xzr
  705. tst x14,#-4 // see if it's carried/borrowed
  706. csel x4,x4,x12,eq
  707. csel x5,x5,x13,eq
  708. #ifdef __ARMEB__
  709. ror x10,x10,#32 // flip nonce words
  710. ror x11,x11,#32
  711. #endif
  712. adds x4,x4,x10 // accumulate nonce
  713. adc x5,x5,x11
  714. #ifdef __ARMEB__
  715. rev x4,x4 // flip output bytes
  716. rev x5,x5
  717. #endif
  718. stp x4,x5,[x1] // write result
  719. ret
  720. .size poly1305_emit_neon,.-poly1305_emit_neon
  721. .align 5
  722. .Lzeros:
  723. .long 0,0,0,0,0,0,0,0
  724. .LOPENSSL_armcap_P:
  725. #ifdef __ILP32__
  726. .long OPENSSL_armcap_P-.
  727. #else
  728. .quad OPENSSL_armcap_P-.
  729. #endif
  730. .byte 80,111,108,121,49,51,48,53,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  731. .align 2
  732. .align 2