poly1305-armv8.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. #include "arm_arch.h"
  2. .text
  3. // forward "declarations" are required for Apple
  4. .private_extern _OPENSSL_armcap_P
  5. .globl _poly1305_init
  6. .private_extern _poly1305_init
  7. .globl _poly1305_blocks
  8. .private_extern _poly1305_blocks
  9. .globl _poly1305_emit
  10. .private_extern _poly1305_emit
  11. .align 5
  12. _poly1305_init:
  13. cmp x1,xzr
  14. stp xzr,xzr,[x0] // zero hash value
  15. stp xzr,xzr,[x0,#16] // [along with is_base2_26]
  16. csel x0,xzr,x0,eq
  17. b.eq Lno_key
  18. #ifdef __ILP32__
  19. ldrsw x11,LOPENSSL_armcap_P
  20. #else
  21. ldr x11,LOPENSSL_armcap_P
  22. #endif
  23. adr x10,LOPENSSL_armcap_P
  24. ldp x7,x8,[x1] // load key
  25. mov x9,#0xfffffffc0fffffff
  26. movk x9,#0x0fff,lsl#48
  27. ldr w17,[x10,x11]
  28. #ifdef __ARMEB__
  29. rev x7,x7 // flip bytes
  30. rev x8,x8
  31. #endif
  32. and x7,x7,x9 // &=0ffffffc0fffffff
  33. and x9,x9,#-4
  34. and x8,x8,x9 // &=0ffffffc0ffffffc
  35. stp x7,x8,[x0,#32] // save key value
  36. tst w17,#ARMV7_NEON
  37. adr x12,_poly1305_blocks
  38. adr x7,poly1305_blocks_neon
  39. adr x13,_poly1305_emit
  40. adr x8,poly1305_emit_neon
  41. csel x12,x12,x7,eq
  42. csel x13,x13,x8,eq
  43. #ifdef __ILP32__
  44. stp w12,w13,[x2]
  45. #else
  46. stp x12,x13,[x2]
  47. #endif
  48. mov x0,#1
  49. Lno_key:
  50. ret
  51. .align 5
  52. _poly1305_blocks:
  53. ands x2,x2,#-16
  54. b.eq Lno_data
  55. ldp x4,x5,[x0] // load hash value
  56. ldp x7,x8,[x0,#32] // load key value
  57. ldr x6,[x0,#16]
  58. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  59. b Loop
  60. .align 5
  61. Loop:
  62. ldp x10,x11,[x1],#16 // load input
  63. sub x2,x2,#16
  64. #ifdef __ARMEB__
  65. rev x10,x10
  66. rev x11,x11
  67. #endif
  68. adds x4,x4,x10 // accumulate input
  69. adcs x5,x5,x11
  70. mul x12,x4,x7 // h0*r0
  71. adc x6,x6,x3
  72. umulh x13,x4,x7
  73. mul x10,x5,x9 // h1*5*r1
  74. umulh x11,x5,x9
  75. adds x12,x12,x10
  76. mul x10,x4,x8 // h0*r1
  77. adc x13,x13,x11
  78. umulh x14,x4,x8
  79. adds x13,x13,x10
  80. mul x10,x5,x7 // h1*r0
  81. adc x14,x14,xzr
  82. umulh x11,x5,x7
  83. adds x13,x13,x10
  84. mul x10,x6,x9 // h2*5*r1
  85. adc x14,x14,x11
  86. mul x11,x6,x7 // h2*r0
  87. adds x13,x13,x10
  88. adc x14,x14,x11
  89. and x10,x14,#-4 // final reduction
  90. and x6,x14,#3
  91. add x10,x10,x14,lsr#2
  92. adds x4,x12,x10
  93. adcs x5,x13,xzr
  94. adc x6,x6,xzr
  95. cbnz x2,Loop
  96. stp x4,x5,[x0] // store hash value
  97. str x6,[x0,#16]
  98. Lno_data:
  99. ret
  100. .align 5
  101. _poly1305_emit:
  102. ldp x4,x5,[x0] // load hash base 2^64
  103. ldr x6,[x0,#16]
  104. ldp x10,x11,[x2] // load nonce
  105. adds x12,x4,#5 // compare to modulus
  106. adcs x13,x5,xzr
  107. adc x14,x6,xzr
  108. tst x14,#-4 // see if it's carried/borrowed
  109. csel x4,x4,x12,eq
  110. csel x5,x5,x13,eq
  111. #ifdef __ARMEB__
  112. ror x10,x10,#32 // flip nonce words
  113. ror x11,x11,#32
  114. #endif
  115. adds x4,x4,x10 // accumulate nonce
  116. adc x5,x5,x11
  117. #ifdef __ARMEB__
  118. rev x4,x4 // flip output bytes
  119. rev x5,x5
  120. #endif
  121. stp x4,x5,[x1] // write result
  122. ret
  123. .align 5
  124. poly1305_mult:
  125. mul x12,x4,x7 // h0*r0
  126. umulh x13,x4,x7
  127. mul x10,x5,x9 // h1*5*r1
  128. umulh x11,x5,x9
  129. adds x12,x12,x10
  130. mul x10,x4,x8 // h0*r1
  131. adc x13,x13,x11
  132. umulh x14,x4,x8
  133. adds x13,x13,x10
  134. mul x10,x5,x7 // h1*r0
  135. adc x14,x14,xzr
  136. umulh x11,x5,x7
  137. adds x13,x13,x10
  138. mul x10,x6,x9 // h2*5*r1
  139. adc x14,x14,x11
  140. mul x11,x6,x7 // h2*r0
  141. adds x13,x13,x10
  142. adc x14,x14,x11
  143. and x10,x14,#-4 // final reduction
  144. and x6,x14,#3
  145. add x10,x10,x14,lsr#2
  146. adds x4,x12,x10
  147. adcs x5,x13,xzr
  148. adc x6,x6,xzr
  149. ret
  150. .align 5
  151. poly1305_splat:
  152. and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
  153. ubfx x13,x4,#26,#26
  154. extr x14,x5,x4,#52
  155. and x14,x14,#0x03ffffff
  156. ubfx x15,x5,#14,#26
  157. extr x16,x6,x5,#40
  158. str w12,[x0,#16*0] // r0
  159. add w12,w13,w13,lsl#2 // r1*5
  160. str w13,[x0,#16*1] // r1
  161. add w13,w14,w14,lsl#2 // r2*5
  162. str w12,[x0,#16*2] // s1
  163. str w14,[x0,#16*3] // r2
  164. add w14,w15,w15,lsl#2 // r3*5
  165. str w13,[x0,#16*4] // s2
  166. str w15,[x0,#16*5] // r3
  167. add w15,w16,w16,lsl#2 // r4*5
  168. str w14,[x0,#16*6] // s3
  169. str w16,[x0,#16*7] // r4
  170. str w15,[x0,#16*8] // s4
  171. ret
  172. .align 5
  173. poly1305_blocks_neon:
  174. ldr x17,[x0,#24]
  175. cmp x2,#128
  176. b.hs Lblocks_neon
  177. cbz x17,_poly1305_blocks
  178. Lblocks_neon:
  179. .long 0xd503233f // paciasp
  180. stp x29,x30,[sp,#-80]!
  181. add x29,sp,#0
  182. ands x2,x2,#-16
  183. b.eq Lno_data_neon
  184. cbz x17,Lbase2_64_neon
  185. ldp w10,w11,[x0] // load hash value base 2^26
  186. ldp w12,w13,[x0,#8]
  187. ldr w14,[x0,#16]
  188. tst x2,#31
  189. b.eq Leven_neon
  190. ldp x7,x8,[x0,#32] // load key value
  191. add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
  192. lsr x5,x12,#12
  193. adds x4,x4,x12,lsl#52
  194. add x5,x5,x13,lsl#14
  195. adc x5,x5,xzr
  196. lsr x6,x14,#24
  197. adds x5,x5,x14,lsl#40
  198. adc x14,x6,xzr // can be partially reduced...
  199. ldp x12,x13,[x1],#16 // load input
  200. sub x2,x2,#16
  201. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  202. and x10,x14,#-4 // ... so reduce
  203. and x6,x14,#3
  204. add x10,x10,x14,lsr#2
  205. adds x4,x4,x10
  206. adcs x5,x5,xzr
  207. adc x6,x6,xzr
  208. #ifdef __ARMEB__
  209. rev x12,x12
  210. rev x13,x13
  211. #endif
  212. adds x4,x4,x12 // accumulate input
  213. adcs x5,x5,x13
  214. adc x6,x6,x3
  215. bl poly1305_mult
  216. ldr x30,[sp,#8]
  217. cbz x3,Lstore_base2_64_neon
  218. and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
  219. ubfx x11,x4,#26,#26
  220. extr x12,x5,x4,#52
  221. and x12,x12,#0x03ffffff
  222. ubfx x13,x5,#14,#26
  223. extr x14,x6,x5,#40
  224. cbnz x2,Leven_neon
  225. stp w10,w11,[x0] // store hash value base 2^26
  226. stp w12,w13,[x0,#8]
  227. str w14,[x0,#16]
  228. b Lno_data_neon
  229. .align 4
  230. Lstore_base2_64_neon:
  231. stp x4,x5,[x0] // store hash value base 2^64
  232. stp x6,xzr,[x0,#16] // note that is_base2_26 is zeroed
  233. b Lno_data_neon
  234. .align 4
  235. Lbase2_64_neon:
  236. ldp x7,x8,[x0,#32] // load key value
  237. ldp x4,x5,[x0] // load hash value base 2^64
  238. ldr x6,[x0,#16]
  239. tst x2,#31
  240. b.eq Linit_neon
  241. ldp x12,x13,[x1],#16 // load input
  242. sub x2,x2,#16
  243. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  244. #ifdef __ARMEB__
  245. rev x12,x12
  246. rev x13,x13
  247. #endif
  248. adds x4,x4,x12 // accumulate input
  249. adcs x5,x5,x13
  250. adc x6,x6,x3
  251. bl poly1305_mult
  252. Linit_neon:
  253. and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
  254. ubfx x11,x4,#26,#26
  255. extr x12,x5,x4,#52
  256. and x12,x12,#0x03ffffff
  257. ubfx x13,x5,#14,#26
  258. extr x14,x6,x5,#40
  259. stp d8,d9,[sp,#16] // meet ABI requirements
  260. stp d10,d11,[sp,#32]
  261. stp d12,d13,[sp,#48]
  262. stp d14,d15,[sp,#64]
  263. fmov d24,x10
  264. fmov d25,x11
  265. fmov d26,x12
  266. fmov d27,x13
  267. fmov d28,x14
  268. ////////////////////////////////// initialize r^n table
  269. mov x4,x7 // r^1
  270. add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
  271. mov x5,x8
  272. mov x6,xzr
  273. add x0,x0,#48+12
  274. bl poly1305_splat
  275. bl poly1305_mult // r^2
  276. sub x0,x0,#4
  277. bl poly1305_splat
  278. bl poly1305_mult // r^3
  279. sub x0,x0,#4
  280. bl poly1305_splat
  281. bl poly1305_mult // r^4
  282. sub x0,x0,#4
  283. bl poly1305_splat
  284. ldr x30,[sp,#8]
  285. add x16,x1,#32
  286. adr x17,Lzeros
  287. subs x2,x2,#64
  288. csel x16,x17,x16,lo
  289. mov x4,#1
  290. str x4,[x0,#-24] // set is_base2_26
  291. sub x0,x0,#48 // restore original x0
  292. b Ldo_neon
  293. .align 4
  294. Leven_neon:
  295. add x16,x1,#32
  296. adr x17,Lzeros
  297. subs x2,x2,#64
  298. csel x16,x17,x16,lo
  299. stp d8,d9,[sp,#16] // meet ABI requirements
  300. stp d10,d11,[sp,#32]
  301. stp d12,d13,[sp,#48]
  302. stp d14,d15,[sp,#64]
  303. fmov d24,x10
  304. fmov d25,x11
  305. fmov d26,x12
  306. fmov d27,x13
  307. fmov d28,x14
  308. Ldo_neon:
  309. ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
  310. ldp x9,x13,[x16],#48
  311. lsl x3,x3,#24
  312. add x15,x0,#48
  313. #ifdef __ARMEB__
  314. rev x8,x8
  315. rev x12,x12
  316. rev x9,x9
  317. rev x13,x13
  318. #endif
  319. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  320. and x5,x9,#0x03ffffff
  321. ubfx x6,x8,#26,#26
  322. ubfx x7,x9,#26,#26
  323. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  324. extr x8,x12,x8,#52
  325. extr x9,x13,x9,#52
  326. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  327. fmov d14,x4
  328. and x8,x8,#0x03ffffff
  329. and x9,x9,#0x03ffffff
  330. ubfx x10,x12,#14,#26
  331. ubfx x11,x13,#14,#26
  332. add x12,x3,x12,lsr#40
  333. add x13,x3,x13,lsr#40
  334. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  335. fmov d15,x6
  336. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  337. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  338. fmov d16,x8
  339. fmov d17,x10
  340. fmov d18,x12
  341. ldp x8,x12,[x1],#16 // inp[0:1]
  342. ldp x9,x13,[x1],#48
  343. ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
  344. ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
  345. ld1 {v8.4s},[x15]
  346. #ifdef __ARMEB__
  347. rev x8,x8
  348. rev x12,x12
  349. rev x9,x9
  350. rev x13,x13
  351. #endif
  352. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  353. and x5,x9,#0x03ffffff
  354. ubfx x6,x8,#26,#26
  355. ubfx x7,x9,#26,#26
  356. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  357. extr x8,x12,x8,#52
  358. extr x9,x13,x9,#52
  359. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  360. fmov d9,x4
  361. and x8,x8,#0x03ffffff
  362. and x9,x9,#0x03ffffff
  363. ubfx x10,x12,#14,#26
  364. ubfx x11,x13,#14,#26
  365. add x12,x3,x12,lsr#40
  366. add x13,x3,x13,lsr#40
  367. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  368. fmov d10,x6
  369. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  370. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  371. movi v31.2d,#-1
  372. fmov d11,x8
  373. fmov d12,x10
  374. fmov d13,x12
  375. ushr v31.2d,v31.2d,#38
  376. b.ls Lskip_loop
  377. .align 4
  378. Loop_neon:
  379. ////////////////////////////////////////////////////////////////
  380. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
  381. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
  382. // ___________________/
  383. // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
  384. // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
  385. // ___________________/ ____________________/
  386. //
  387. // Note that we start with inp[2:3]*r^2. This is because it
  388. // doesn't depend on reduction in previous iteration.
  389. ////////////////////////////////////////////////////////////////
  390. // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
  391. // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
  392. // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
  393. // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
  394. // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
  395. subs x2,x2,#64
  396. umull v23.2d,v14.2s,v7.s[2]
  397. csel x16,x17,x16,lo
  398. umull v22.2d,v14.2s,v5.s[2]
  399. umull v21.2d,v14.2s,v3.s[2]
  400. ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
  401. umull v20.2d,v14.2s,v1.s[2]
  402. ldp x9,x13,[x16],#48
  403. umull v19.2d,v14.2s,v0.s[2]
  404. #ifdef __ARMEB__
  405. rev x8,x8
  406. rev x12,x12
  407. rev x9,x9
  408. rev x13,x13
  409. #endif
  410. umlal v23.2d,v15.2s,v5.s[2]
  411. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  412. umlal v22.2d,v15.2s,v3.s[2]
  413. and x5,x9,#0x03ffffff
  414. umlal v21.2d,v15.2s,v1.s[2]
  415. ubfx x6,x8,#26,#26
  416. umlal v20.2d,v15.2s,v0.s[2]
  417. ubfx x7,x9,#26,#26
  418. umlal v19.2d,v15.2s,v8.s[2]
  419. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  420. umlal v23.2d,v16.2s,v3.s[2]
  421. extr x8,x12,x8,#52
  422. umlal v22.2d,v16.2s,v1.s[2]
  423. extr x9,x13,x9,#52
  424. umlal v21.2d,v16.2s,v0.s[2]
  425. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  426. umlal v20.2d,v16.2s,v8.s[2]
  427. fmov d14,x4
  428. umlal v19.2d,v16.2s,v6.s[2]
  429. and x8,x8,#0x03ffffff
  430. umlal v23.2d,v17.2s,v1.s[2]
  431. and x9,x9,#0x03ffffff
  432. umlal v22.2d,v17.2s,v0.s[2]
  433. ubfx x10,x12,#14,#26
  434. umlal v21.2d,v17.2s,v8.s[2]
  435. ubfx x11,x13,#14,#26
  436. umlal v20.2d,v17.2s,v6.s[2]
  437. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  438. umlal v19.2d,v17.2s,v4.s[2]
  439. fmov d15,x6
  440. add v11.2s,v11.2s,v26.2s
  441. add x12,x3,x12,lsr#40
  442. umlal v23.2d,v18.2s,v0.s[2]
  443. add x13,x3,x13,lsr#40
  444. umlal v22.2d,v18.2s,v8.s[2]
  445. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  446. umlal v21.2d,v18.2s,v6.s[2]
  447. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  448. umlal v20.2d,v18.2s,v4.s[2]
  449. fmov d16,x8
  450. umlal v19.2d,v18.2s,v2.s[2]
  451. fmov d17,x10
  452. ////////////////////////////////////////////////////////////////
  453. // (hash+inp[0:1])*r^4 and accumulate
  454. add v9.2s,v9.2s,v24.2s
  455. fmov d18,x12
  456. umlal v22.2d,v11.2s,v1.s[0]
  457. ldp x8,x12,[x1],#16 // inp[0:1]
  458. umlal v19.2d,v11.2s,v6.s[0]
  459. ldp x9,x13,[x1],#48
  460. umlal v23.2d,v11.2s,v3.s[0]
  461. umlal v20.2d,v11.2s,v8.s[0]
  462. umlal v21.2d,v11.2s,v0.s[0]
  463. #ifdef __ARMEB__
  464. rev x8,x8
  465. rev x12,x12
  466. rev x9,x9
  467. rev x13,x13
  468. #endif
  469. add v10.2s,v10.2s,v25.2s
  470. umlal v22.2d,v9.2s,v5.s[0]
  471. umlal v23.2d,v9.2s,v7.s[0]
  472. and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
  473. umlal v21.2d,v9.2s,v3.s[0]
  474. and x5,x9,#0x03ffffff
  475. umlal v19.2d,v9.2s,v0.s[0]
  476. ubfx x6,x8,#26,#26
  477. umlal v20.2d,v9.2s,v1.s[0]
  478. ubfx x7,x9,#26,#26
  479. add v12.2s,v12.2s,v27.2s
  480. add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
  481. umlal v22.2d,v10.2s,v3.s[0]
  482. extr x8,x12,x8,#52
  483. umlal v23.2d,v10.2s,v5.s[0]
  484. extr x9,x13,x9,#52
  485. umlal v19.2d,v10.2s,v8.s[0]
  486. add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
  487. umlal v21.2d,v10.2s,v1.s[0]
  488. fmov d9,x4
  489. umlal v20.2d,v10.2s,v0.s[0]
  490. and x8,x8,#0x03ffffff
  491. add v13.2s,v13.2s,v28.2s
  492. and x9,x9,#0x03ffffff
  493. umlal v22.2d,v12.2s,v0.s[0]
  494. ubfx x10,x12,#14,#26
  495. umlal v19.2d,v12.2s,v4.s[0]
  496. ubfx x11,x13,#14,#26
  497. umlal v23.2d,v12.2s,v1.s[0]
  498. add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
  499. umlal v20.2d,v12.2s,v6.s[0]
  500. fmov d10,x6
  501. umlal v21.2d,v12.2s,v8.s[0]
  502. add x12,x3,x12,lsr#40
  503. umlal v22.2d,v13.2s,v8.s[0]
  504. add x13,x3,x13,lsr#40
  505. umlal v19.2d,v13.2s,v2.s[0]
  506. add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
  507. umlal v23.2d,v13.2s,v0.s[0]
  508. add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
  509. umlal v20.2d,v13.2s,v4.s[0]
  510. fmov d11,x8
  511. umlal v21.2d,v13.2s,v6.s[0]
  512. fmov d12,x10
  513. fmov d13,x12
  514. /////////////////////////////////////////////////////////////////
  515. // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
  516. // and P. Schwabe
  517. //
  518. // [see discussion in poly1305-armv4 module]
  519. ushr v29.2d,v22.2d,#26
  520. xtn v27.2s,v22.2d
  521. ushr v30.2d,v19.2d,#26
  522. and v19.16b,v19.16b,v31.16b
  523. add v23.2d,v23.2d,v29.2d // h3 -> h4
  524. bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
  525. add v20.2d,v20.2d,v30.2d // h0 -> h1
  526. ushr v29.2d,v23.2d,#26
  527. xtn v28.2s,v23.2d
  528. ushr v30.2d,v20.2d,#26
  529. xtn v25.2s,v20.2d
  530. bic v28.2s,#0xfc,lsl#24
  531. add v21.2d,v21.2d,v30.2d // h1 -> h2
  532. add v19.2d,v19.2d,v29.2d
  533. shl v29.2d,v29.2d,#2
  534. shrn v30.2s,v21.2d,#26
  535. xtn v26.2s,v21.2d
  536. add v19.2d,v19.2d,v29.2d // h4 -> h0
  537. bic v25.2s,#0xfc,lsl#24
  538. add v27.2s,v27.2s,v30.2s // h2 -> h3
  539. bic v26.2s,#0xfc,lsl#24
  540. shrn v29.2s,v19.2d,#26
  541. xtn v24.2s,v19.2d
  542. ushr v30.2s,v27.2s,#26
  543. bic v27.2s,#0xfc,lsl#24
  544. bic v24.2s,#0xfc,lsl#24
  545. add v25.2s,v25.2s,v29.2s // h0 -> h1
  546. add v28.2s,v28.2s,v30.2s // h3 -> h4
  547. b.hi Loop_neon
  548. Lskip_loop:
  549. dup v16.2d,v16.d[0]
  550. add v11.2s,v11.2s,v26.2s
  551. ////////////////////////////////////////////////////////////////
  552. // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
  553. adds x2,x2,#32
  554. b.ne Long_tail
  555. dup v16.2d,v11.d[0]
  556. add v14.2s,v9.2s,v24.2s
  557. add v17.2s,v12.2s,v27.2s
  558. add v15.2s,v10.2s,v25.2s
  559. add v18.2s,v13.2s,v28.2s
  560. Long_tail:
  561. dup v14.2d,v14.d[0]
  562. umull2 v19.2d,v16.4s,v6.4s
  563. umull2 v22.2d,v16.4s,v1.4s
  564. umull2 v23.2d,v16.4s,v3.4s
  565. umull2 v21.2d,v16.4s,v0.4s
  566. umull2 v20.2d,v16.4s,v8.4s
  567. dup v15.2d,v15.d[0]
  568. umlal2 v19.2d,v14.4s,v0.4s
  569. umlal2 v21.2d,v14.4s,v3.4s
  570. umlal2 v22.2d,v14.4s,v5.4s
  571. umlal2 v23.2d,v14.4s,v7.4s
  572. umlal2 v20.2d,v14.4s,v1.4s
  573. dup v17.2d,v17.d[0]
  574. umlal2 v19.2d,v15.4s,v8.4s
  575. umlal2 v22.2d,v15.4s,v3.4s
  576. umlal2 v21.2d,v15.4s,v1.4s
  577. umlal2 v23.2d,v15.4s,v5.4s
  578. umlal2 v20.2d,v15.4s,v0.4s
  579. dup v18.2d,v18.d[0]
  580. umlal2 v22.2d,v17.4s,v0.4s
  581. umlal2 v23.2d,v17.4s,v1.4s
  582. umlal2 v19.2d,v17.4s,v4.4s
  583. umlal2 v20.2d,v17.4s,v6.4s
  584. umlal2 v21.2d,v17.4s,v8.4s
  585. umlal2 v22.2d,v18.4s,v8.4s
  586. umlal2 v19.2d,v18.4s,v2.4s
  587. umlal2 v23.2d,v18.4s,v0.4s
  588. umlal2 v20.2d,v18.4s,v4.4s
  589. umlal2 v21.2d,v18.4s,v6.4s
  590. b.eq Lshort_tail
  591. ////////////////////////////////////////////////////////////////
  592. // (hash+inp[0:1])*r^4:r^3 and accumulate
  593. add v9.2s,v9.2s,v24.2s
  594. umlal v22.2d,v11.2s,v1.2s
  595. umlal v19.2d,v11.2s,v6.2s
  596. umlal v23.2d,v11.2s,v3.2s
  597. umlal v20.2d,v11.2s,v8.2s
  598. umlal v21.2d,v11.2s,v0.2s
  599. add v10.2s,v10.2s,v25.2s
  600. umlal v22.2d,v9.2s,v5.2s
  601. umlal v19.2d,v9.2s,v0.2s
  602. umlal v23.2d,v9.2s,v7.2s
  603. umlal v20.2d,v9.2s,v1.2s
  604. umlal v21.2d,v9.2s,v3.2s
  605. add v12.2s,v12.2s,v27.2s
  606. umlal v22.2d,v10.2s,v3.2s
  607. umlal v19.2d,v10.2s,v8.2s
  608. umlal v23.2d,v10.2s,v5.2s
  609. umlal v20.2d,v10.2s,v0.2s
  610. umlal v21.2d,v10.2s,v1.2s
  611. add v13.2s,v13.2s,v28.2s
  612. umlal v22.2d,v12.2s,v0.2s
  613. umlal v19.2d,v12.2s,v4.2s
  614. umlal v23.2d,v12.2s,v1.2s
  615. umlal v20.2d,v12.2s,v6.2s
  616. umlal v21.2d,v12.2s,v8.2s
  617. umlal v22.2d,v13.2s,v8.2s
  618. umlal v19.2d,v13.2s,v2.2s
  619. umlal v23.2d,v13.2s,v0.2s
  620. umlal v20.2d,v13.2s,v4.2s
  621. umlal v21.2d,v13.2s,v6.2s
  622. Lshort_tail:
  623. ////////////////////////////////////////////////////////////////
  624. // horizontal add
  625. addp v22.2d,v22.2d,v22.2d
  626. ldp d8,d9,[sp,#16] // meet ABI requirements
  627. addp v19.2d,v19.2d,v19.2d
  628. ldp d10,d11,[sp,#32]
  629. addp v23.2d,v23.2d,v23.2d
  630. ldp d12,d13,[sp,#48]
  631. addp v20.2d,v20.2d,v20.2d
  632. ldp d14,d15,[sp,#64]
  633. addp v21.2d,v21.2d,v21.2d
  634. ////////////////////////////////////////////////////////////////
  635. // lazy reduction, but without narrowing
  636. ushr v29.2d,v22.2d,#26
  637. and v22.16b,v22.16b,v31.16b
  638. ushr v30.2d,v19.2d,#26
  639. and v19.16b,v19.16b,v31.16b
  640. add v23.2d,v23.2d,v29.2d // h3 -> h4
  641. add v20.2d,v20.2d,v30.2d // h0 -> h1
  642. ushr v29.2d,v23.2d,#26
  643. and v23.16b,v23.16b,v31.16b
  644. ushr v30.2d,v20.2d,#26
  645. and v20.16b,v20.16b,v31.16b
  646. add v21.2d,v21.2d,v30.2d // h1 -> h2
  647. add v19.2d,v19.2d,v29.2d
  648. shl v29.2d,v29.2d,#2
  649. ushr v30.2d,v21.2d,#26
  650. and v21.16b,v21.16b,v31.16b
  651. add v19.2d,v19.2d,v29.2d // h4 -> h0
  652. add v22.2d,v22.2d,v30.2d // h2 -> h3
  653. ushr v29.2d,v19.2d,#26
  654. and v19.16b,v19.16b,v31.16b
  655. ushr v30.2d,v22.2d,#26
  656. and v22.16b,v22.16b,v31.16b
  657. add v20.2d,v20.2d,v29.2d // h0 -> h1
  658. add v23.2d,v23.2d,v30.2d // h3 -> h4
  659. ////////////////////////////////////////////////////////////////
  660. // write the result, can be partially reduced
  661. st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
  662. st1 {v23.s}[0],[x0]
  663. Lno_data_neon:
  664. ldr x29,[sp],#80
  665. .long 0xd50323bf // autiasp
  666. ret
  667. .align 5
  668. poly1305_emit_neon:
  669. ldr x17,[x0,#24]
  670. cbz x17,_poly1305_emit
  671. ldp w10,w11,[x0] // load hash value base 2^26
  672. ldp w12,w13,[x0,#8]
  673. ldr w14,[x0,#16]
  674. add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
  675. lsr x5,x12,#12
  676. adds x4,x4,x12,lsl#52
  677. add x5,x5,x13,lsl#14
  678. adc x5,x5,xzr
  679. lsr x6,x14,#24
  680. adds x5,x5,x14,lsl#40
  681. adc x6,x6,xzr // can be partially reduced...
  682. ldp x10,x11,[x2] // load nonce
  683. and x12,x6,#-4 // ... so reduce
  684. add x12,x12,x6,lsr#2
  685. and x6,x6,#3
  686. adds x4,x4,x12
  687. adcs x5,x5,xzr
  688. adc x6,x6,xzr
  689. adds x12,x4,#5 // compare to modulus
  690. adcs x13,x5,xzr
  691. adc x14,x6,xzr
  692. tst x14,#-4 // see if it's carried/borrowed
  693. csel x4,x4,x12,eq
  694. csel x5,x5,x13,eq
  695. #ifdef __ARMEB__
  696. ror x10,x10,#32 // flip nonce words
  697. ror x11,x11,#32
  698. #endif
  699. adds x4,x4,x10 // accumulate nonce
  700. adc x5,x5,x11
  701. #ifdef __ARMEB__
  702. rev x4,x4 // flip output bytes
  703. rev x5,x5
  704. #endif
  705. stp x4,x5,[x1] // write result
  706. ret
  707. .align 5
  708. Lzeros:
  709. .long 0,0,0,0,0,0,0,0
  710. LOPENSSL_armcap_P:
  711. #ifdef __ILP32__
  712. .long _OPENSSL_armcap_P-.
  713. #else
  714. .quad _OPENSSL_armcap_P-.
  715. #endif
  716. .byte 80,111,108,121,49,51,48,53,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  717. .align 2
  718. .align 2