sc25519_sub_nored.S 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # qhasm: int64 rp
  2. # qhasm: int64 xp
  3. # qhasm: int64 yp
  4. # qhasm: input rp
  5. # qhasm: input xp
  6. # qhasm: input yp
  7. # qhasm: int64 r0
  8. # qhasm: int64 r1
  9. # qhasm: int64 r2
  10. # qhasm: int64 r3
  11. # qhasm: int64 t0
  12. # qhasm: int64 t1
  13. # qhasm: int64 t2
  14. # qhasm: int64 t3
  15. # qhasm: int64 caller1
  16. # qhasm: int64 caller2
  17. # qhasm: int64 caller3
  18. # qhasm: int64 caller4
  19. # qhasm: int64 caller5
  20. # qhasm: int64 caller6
  21. # qhasm: int64 caller7
  22. # qhasm: caller caller1
  23. # qhasm: caller caller2
  24. # qhasm: caller caller3
  25. # qhasm: caller caller4
  26. # qhasm: caller caller5
  27. # qhasm: caller caller6
  28. # qhasm: caller caller7
  29. # qhasm: stack64 caller4_stack
  30. # qhasm: stack64 caller5_stack
  31. # qhasm: stack64 caller6_stack
  32. # qhasm: stack64 caller7_stack
  33. # qhasm: enter CRYPTO_NAMESPACE(sc25519_sub_nored)
  34. .text
  35. .p2align 5
  36. .globl _CRYPTO_NAMESPACE(sc25519_sub_nored)
  37. .globl CRYPTO_NAMESPACE(sc25519_sub_nored)
  38. _CRYPTO_NAMESPACE(sc25519_sub_nored):
  39. CRYPTO_NAMESPACE(sc25519_sub_nored):
  40. mov %rsp,%r11
  41. and $31,%r11
  42. add $0,%r11
  43. sub %r11,%rsp
  44. # qhasm: r0 = *(uint64 *)(xp + 0)
  45. # asm 1: movq 0(<xp=int64#2),>r0=int64#4
  46. # asm 2: movq 0(<xp=%rsi),>r0=%rcx
  47. movq 0(%rsi),%rcx
  48. # qhasm: r1 = *(uint64 *)(xp + 8)
  49. # asm 1: movq 8(<xp=int64#2),>r1=int64#5
  50. # asm 2: movq 8(<xp=%rsi),>r1=%r8
  51. movq 8(%rsi),%r8
  52. # qhasm: r2 = *(uint64 *)(xp + 16)
  53. # asm 1: movq 16(<xp=int64#2),>r2=int64#6
  54. # asm 2: movq 16(<xp=%rsi),>r2=%r9
  55. movq 16(%rsi),%r9
  56. # qhasm: r3 = *(uint64 *)(xp + 24)
  57. # asm 1: movq 24(<xp=int64#2),>r3=int64#2
  58. # asm 2: movq 24(<xp=%rsi),>r3=%rsi
  59. movq 24(%rsi),%rsi
  60. # qhasm: carry? r0 -= *(uint64 *)(yp + 0)
  61. # asm 1: subq 0(<yp=int64#3),<r0=int64#4
  62. # asm 2: subq 0(<yp=%rdx),<r0=%rcx
  63. subq 0(%rdx),%rcx
  64. # qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry
  65. # asm 1: sbbq 8(<yp=int64#3),<r1=int64#5
  66. # asm 2: sbbq 8(<yp=%rdx),<r1=%r8
  67. sbbq 8(%rdx),%r8
  68. # qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry
  69. # asm 1: sbbq 16(<yp=int64#3),<r2=int64#6
  70. # asm 2: sbbq 16(<yp=%rdx),<r2=%r9
  71. sbbq 16(%rdx),%r9
  72. # qhasm: r3 -= *(uint64 *)(yp + 24) - carry
  73. # asm 1: sbbq 24(<yp=int64#3),<r3=int64#2
  74. # asm 2: sbbq 24(<yp=%rdx),<r3=%rsi
  75. sbbq 24(%rdx),%rsi
  76. # qhasm: *(uint64 *)(rp + 0) = r0
  77. # asm 1: movq <r0=int64#4,0(<rp=int64#1)
  78. # asm 2: movq <r0=%rcx,0(<rp=%rdi)
  79. movq %rcx,0(%rdi)
  80. # qhasm: *(uint64 *)(rp + 8) = r1
  81. # asm 1: movq <r1=int64#5,8(<rp=int64#1)
  82. # asm 2: movq <r1=%r8,8(<rp=%rdi)
  83. movq %r8,8(%rdi)
  84. # qhasm: *(uint64 *)(rp + 16) = r2
  85. # asm 1: movq <r2=int64#6,16(<rp=int64#1)
  86. # asm 2: movq <r2=%r9,16(<rp=%rdi)
  87. movq %r9,16(%rdi)
  88. # qhasm: *(uint64 *)(rp + 24) = r3
  89. # asm 1: movq <r3=int64#2,24(<rp=int64#1)
  90. # asm 2: movq <r3=%rsi,24(<rp=%rdi)
  91. movq %rsi,24(%rdi)
  92. # qhasm: leave
  93. add %r11,%rsp
  94. mov %rdi,%rax
  95. mov %rsi,%rdx
  96. ret