ghashv8-armx.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #include "arm_arch.h"
  2. #if __ARM_MAX_ARCH__>=7
  3. .text
  4. .fpu neon
  5. .code 32
  6. #undef __thumb2__
  7. .globl gcm_init_v8
  8. .type gcm_init_v8,%function
  9. .align 4
  10. gcm_init_v8:
  11. vld1.64 {q9},[r1] @ load input H
  12. vmov.i8 q11,#0xe1
  13. vshl.i64 q11,q11,#57 @ 0xc2.0
  14. vext.8 q3,q9,q9,#8
  15. vshr.u64 q10,q11,#63
  16. vdup.32 q9,d18[1]
  17. vext.8 q8,q10,q11,#8 @ t0=0xc2....01
  18. vshr.u64 q10,q3,#63
  19. vshr.s32 q9,q9,#31 @ broadcast carry bit
  20. vand q10,q10,q8
  21. vshl.i64 q3,q3,#1
  22. vext.8 q10,q10,q10,#8
  23. vand q8,q8,q9
  24. vorr q3,q3,q10 @ H<<<=1
  25. veor q12,q3,q8 @ twisted H
  26. vst1.64 {q12},[r0]! @ store Htable[0]
  27. @ calculate H^2
  28. vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
  29. .byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
  30. veor q8,q8,q12
  31. .byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
  32. .byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
  33. vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
  34. veor q10,q0,q2
  35. veor q1,q1,q9
  36. veor q1,q1,q10
  37. .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
  38. vmov d4,d3 @ Xh|Xm - 256-bit result
  39. vmov d3,d0 @ Xm is rotated Xl
  40. veor q0,q1,q10
  41. vext.8 q10,q0,q0,#8 @ 2nd phase
  42. .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
  43. veor q10,q10,q2
  44. veor q14,q0,q10
  45. vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
  46. veor q9,q9,q14
  47. vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
  48. vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
  49. bx lr
  50. .size gcm_init_v8,.-gcm_init_v8
  51. .globl gcm_gmult_v8
  52. .type gcm_gmult_v8,%function
  53. .align 4
  54. gcm_gmult_v8:
  55. vld1.64 {q9},[r0] @ load Xi
  56. vmov.i8 q11,#0xe1
  57. vld1.64 {q12,q13},[r1] @ load twisted H, ...
  58. vshl.u64 q11,q11,#57
  59. #ifndef __ARMEB__
  60. vrev64.8 q9,q9
  61. #endif
  62. vext.8 q3,q9,q9,#8
  63. .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
  64. veor q9,q9,q3 @ Karatsuba pre-processing
  65. .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
  66. .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
  67. vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
  68. veor q10,q0,q2
  69. veor q1,q1,q9
  70. veor q1,q1,q10
  71. .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
  72. vmov d4,d3 @ Xh|Xm - 256-bit result
  73. vmov d3,d0 @ Xm is rotated Xl
  74. veor q0,q1,q10
  75. vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
  76. .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
  77. veor q10,q10,q2
  78. veor q0,q0,q10
  79. #ifndef __ARMEB__
  80. vrev64.8 q0,q0
  81. #endif
  82. vext.8 q0,q0,q0,#8
  83. vst1.64 {q0},[r0] @ write out Xi
  84. bx lr
  85. .size gcm_gmult_v8,.-gcm_gmult_v8
  86. .globl gcm_ghash_v8
  87. .type gcm_ghash_v8,%function
  88. .align 4
  89. gcm_ghash_v8:
  90. vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
  91. vld1.64 {q0},[r0] @ load [rotated] Xi
  92. @ "[rotated]" means that
  93. @ loaded value would have
  94. @ to be rotated in order to
  95. @ make it appear as in
  96. @ algorithm specification
  97. subs r3,r3,#32 @ see if r3 is 32 or larger
  98. mov r12,#16 @ r12 is used as post-
  99. @ increment for input pointer;
  100. @ as loop is modulo-scheduled
  101. @ r12 is zeroed just in time
  102. @ to preclude overstepping
  103. @ inp[len], which means that
  104. @ last block[s] are actually
  105. @ loaded twice, but last
  106. @ copy is not processed
  107. vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
  108. vmov.i8 q11,#0xe1
  109. vld1.64 {q14},[r1]
  110. moveq r12,#0 @ is it time to zero r12?
  111. vext.8 q0,q0,q0,#8 @ rotate Xi
  112. vld1.64 {q8},[r2]! @ load [rotated] I[0]
  113. vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
  114. #ifndef __ARMEB__
  115. vrev64.8 q8,q8
  116. vrev64.8 q0,q0
  117. #endif
  118. vext.8 q3,q8,q8,#8 @ rotate I[0]
  119. blo .Lodd_tail_v8 @ r3 was less than 32
  120. vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
  121. #ifndef __ARMEB__
  122. vrev64.8 q9,q9
  123. #endif
  124. vext.8 q7,q9,q9,#8
  125. veor q3,q3,q0 @ I[i]^=Xi
  126. .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
  127. veor q9,q9,q7 @ Karatsuba pre-processing
  128. .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
  129. b .Loop_mod2x_v8
  130. .align 4
  131. .Loop_mod2x_v8:
  132. vext.8 q10,q3,q3,#8
  133. subs r3,r3,#32 @ is there more data?
  134. .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
  135. movlo r12,#0 @ is it time to zero r12?
  136. .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
  137. veor q10,q10,q3 @ Karatsuba pre-processing
  138. .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
  139. veor q0,q0,q4 @ accumulate
  140. .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
  141. vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
  142. veor q2,q2,q6
  143. moveq r12,#0 @ is it time to zero r12?
  144. veor q1,q1,q5
  145. vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
  146. veor q10,q0,q2
  147. veor q1,q1,q9
  148. vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
  149. #ifndef __ARMEB__
  150. vrev64.8 q8,q8
  151. #endif
  152. veor q1,q1,q10
  153. .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
  154. #ifndef __ARMEB__
  155. vrev64.8 q9,q9
  156. #endif
  157. vmov d4,d3 @ Xh|Xm - 256-bit result
  158. vmov d3,d0 @ Xm is rotated Xl
  159. vext.8 q7,q9,q9,#8
  160. vext.8 q3,q8,q8,#8
  161. veor q0,q1,q10
  162. .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
  163. veor q3,q3,q2 @ accumulate q3 early
  164. vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
  165. .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
  166. veor q3,q3,q10
  167. veor q9,q9,q7 @ Karatsuba pre-processing
  168. veor q3,q3,q0
  169. .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
  170. bhs .Loop_mod2x_v8 @ there was at least 32 more bytes
  171. veor q2,q2,q10
  172. vext.8 q3,q8,q8,#8 @ re-construct q3
  173. adds r3,r3,#32 @ re-construct r3
  174. veor q0,q0,q2 @ re-construct q0
  175. beq .Ldone_v8 @ is r3 zero?
  176. .Lodd_tail_v8:
  177. vext.8 q10,q0,q0,#8
  178. veor q3,q3,q0 @ inp^=Xi
  179. veor q9,q8,q10 @ q9 is rotated inp^Xi
  180. .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
  181. veor q9,q9,q3 @ Karatsuba pre-processing
  182. .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
  183. .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
  184. vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
  185. veor q10,q0,q2
  186. veor q1,q1,q9
  187. veor q1,q1,q10
  188. .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
  189. vmov d4,d3 @ Xh|Xm - 256-bit result
  190. vmov d3,d0 @ Xm is rotated Xl
  191. veor q0,q1,q10
  192. vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
  193. .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
  194. veor q10,q10,q2
  195. veor q0,q0,q10
  196. .Ldone_v8:
  197. #ifndef __ARMEB__
  198. vrev64.8 q0,q0
  199. #endif
  200. vext.8 q0,q0,q0,#8
  201. vst1.64 {q0},[r0] @ write out Xi
  202. vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
  203. bx lr
  204. .size gcm_ghash_v8,.-gcm_ghash_v8
  205. .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  206. .align 2
  207. .align 2
  208. #endif