blake2b_ssse3.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #pragma once
  2. #include <emmintrin.h>
  3. #include <tmmintrin.h>
  4. #include "blake2b.h"
  5. #include <library/cpp/digest/argonish/internal/rotations/rotations_ssse3.h>
  6. namespace NArgonish {
  7. template <>
  8. void* TBlake2B<EInstructionSet::SSSE3>::GetIV_() const {
  9. static const __m128i Iv[4] = {
  10. _mm_set_epi64x(0xbb67ae8584caa73bULL, 0x6a09e667f3bcc908ULL),
  11. _mm_set_epi64x(0xa54ff53a5f1d36f1ULL, 0x3c6ef372fe94f82bULL),
  12. _mm_set_epi64x(0x9b05688c2b3e6c1fULL, 0x510e527fade682d1ULL),
  13. _mm_set_epi64x(0x5be0cd19137e2179ULL, 0x1f83d9abfb41bd6bULL)};
  14. return (void*)Iv;
  15. }
  16. static const ui32 Sigma[12][16] = {
  17. {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
  18. {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3},
  19. {11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4},
  20. {7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8},
  21. {9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13},
  22. {2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9},
  23. {12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11},
  24. {13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10},
  25. {6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5},
  26. {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0},
  27. {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
  28. {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}};
  29. static inline void G1(
  30. __m128i& row1l, __m128i& row2l, __m128i& row3l, __m128i& row4l,
  31. __m128i& row1h, __m128i& row2h, __m128i& row3h, __m128i& row4h,
  32. __m128i& b0, __m128i& b1) {
  33. row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l);
  34. row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h);
  35. row4l = _mm_xor_si128(row4l, row1l);
  36. row4h = _mm_xor_si128(row4h, row1h);
  37. row4l = Rotr32(row4l);
  38. row4h = Rotr32(row4h);
  39. row3l = _mm_add_epi64(row3l, row4l);
  40. row3h = _mm_add_epi64(row3h, row4h);
  41. row2l = _mm_xor_si128(row2l, row3l);
  42. row2h = _mm_xor_si128(row2h, row3h);
  43. row2l = Rotr24(row2l);
  44. row2h = Rotr24(row2h);
  45. }
  46. static inline void G2(
  47. __m128i& row1l, __m128i& row2l, __m128i& row3l, __m128i& row4l,
  48. __m128i& row1h, __m128i& row2h, __m128i& row3h, __m128i& row4h,
  49. __m128i& b0, __m128i& b1) {
  50. row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l);
  51. row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h);
  52. row4l = _mm_xor_si128(row4l, row1l);
  53. row4h = _mm_xor_si128(row4h, row1h);
  54. row4l = Rotr16(row4l);
  55. row4h = Rotr16(row4h);
  56. row3l = _mm_add_epi64(row3l, row4l);
  57. row3h = _mm_add_epi64(row3h, row4h);
  58. row2l = _mm_xor_si128(row2l, row3l);
  59. row2h = _mm_xor_si128(row2h, row3h);
  60. row2l = Rotr63(row2l);
  61. row2h = Rotr63(row2h);
  62. }
  63. static inline void Diagonalize(
  64. __m128i& row2l, __m128i& row3l, __m128i& row4l,
  65. __m128i& row2h, __m128i& row3h, __m128i& row4h) {
  66. __m128i t0 = _mm_alignr_epi8(row2h, row2l, 8);
  67. __m128i t1 = _mm_alignr_epi8(row2l, row2h, 8);
  68. row2l = t0;
  69. row2h = t1;
  70. t0 = row3l;
  71. row3l = row3h;
  72. row3h = t0;
  73. t0 = _mm_alignr_epi8(row4h, row4l, 8);
  74. t1 = _mm_alignr_epi8(row4l, row4h, 8);
  75. row4l = t1;
  76. row4h = t0;
  77. }
  78. static inline void Undiagonalize(
  79. __m128i& row2l, __m128i& row3l, __m128i& row4l,
  80. __m128i& row2h, __m128i& row3h, __m128i& row4h) {
  81. __m128i t0 = _mm_alignr_epi8(row2l, row2h, 8);
  82. __m128i t1 = _mm_alignr_epi8(row2h, row2l, 8);
  83. row2l = t0;
  84. row2h = t1;
  85. t0 = row3l;
  86. row3l = row3h;
  87. row3h = t0;
  88. t0 = _mm_alignr_epi8(row4l, row4h, 8);
  89. t1 = _mm_alignr_epi8(row4h, row4l, 8);
  90. row4l = t1;
  91. row4h = t0;
  92. }
  93. static inline void Round(int r, const ui64* block_ptr,
  94. __m128i& row1l, __m128i& row2l, __m128i& row3l, __m128i& row4l,
  95. __m128i& row1h, __m128i& row2h, __m128i& row3h, __m128i& row4h) {
  96. __m128i b0, b1;
  97. b0 = _mm_set_epi64x(block_ptr[Sigma[r][2]], block_ptr[Sigma[r][0]]);
  98. b1 = _mm_set_epi64x(block_ptr[Sigma[r][6]], block_ptr[Sigma[r][4]]);
  99. G1(row1l, row2l, row3l, row4l, row1h, row2h, row3h, row4h, b0, b1);
  100. b0 = _mm_set_epi64x(block_ptr[Sigma[r][3]], block_ptr[Sigma[r][1]]);
  101. b1 = _mm_set_epi64x(block_ptr[Sigma[r][7]], block_ptr[Sigma[r][5]]);
  102. G2(row1l, row2l, row3l, row4l, row1h, row2h, row3h, row4h, b0, b1);
  103. Diagonalize(row2l, row3l, row4l, row2h, row3h, row4h);
  104. b0 = _mm_set_epi64x(block_ptr[Sigma[r][10]], block_ptr[Sigma[r][8]]);
  105. b1 = _mm_set_epi64x(block_ptr[Sigma[r][14]], block_ptr[Sigma[r][12]]);
  106. G1(row1l, row2l, row3l, row4l, row1h, row2h, row3h, row4h, b0, b1);
  107. b0 = _mm_set_epi64x(block_ptr[Sigma[r][11]], block_ptr[Sigma[r][9]]);
  108. b1 = _mm_set_epi64x(block_ptr[Sigma[r][15]], block_ptr[Sigma[r][13]]);
  109. G2(row1l, row2l, row3l, row4l, row1h, row2h, row3h, row4h, b0, b1);
  110. Undiagonalize(row2l, row3l, row4l, row2h, row3h, row4h);
  111. }
  112. template <>
  113. void TBlake2B<EInstructionSet::SSSE3>::InitialXor_(ui8* h, const ui8* p) {
  114. __m128i* m_res = (__m128i*)h;
  115. const __m128i* m_p = (__m128i*)p;
  116. __m128i* iv = (__m128i*)GetIV_();
  117. _mm_storeu_si128(m_res + 0, _mm_xor_si128(iv[0], _mm_loadu_si128(m_p + 0)));
  118. _mm_storeu_si128(m_res + 1, _mm_xor_si128(iv[1], _mm_loadu_si128(m_p + 1)));
  119. _mm_storeu_si128(m_res + 2, _mm_xor_si128(iv[2], _mm_loadu_si128(m_p + 2)));
  120. _mm_storeu_si128(m_res + 3, _mm_xor_si128(iv[3], _mm_loadu_si128(m_p + 3)));
  121. }
  122. template <>
  123. void TBlake2B<EInstructionSet::SSSE3>::Compress_(const ui64 block[BLAKE2B_BLOCKQWORDS]) {
  124. __m128i* iv = (__m128i*)GetIV_();
  125. __m128i row1l = _mm_loadu_si128((__m128i*)&State_.H[0]);
  126. __m128i row1h = _mm_loadu_si128((__m128i*)&State_.H[2]);
  127. __m128i row2l = _mm_loadu_si128((__m128i*)&State_.H[4]);
  128. __m128i row2h = _mm_loadu_si128((__m128i*)&State_.H[6]);
  129. __m128i row3l = iv[0];
  130. __m128i row3h = iv[1];
  131. __m128i row4l = _mm_xor_si128(iv[2], _mm_loadu_si128((__m128i*)&State_.T[0]));
  132. __m128i row4h = _mm_xor_si128(iv[3], _mm_loadu_si128((__m128i*)&State_.F[0]));
  133. for (int r = 0; r < 12; ++r)
  134. Round(r, block, row1l, row2l, row3l, row4l, row1h, row2h, row3h, row4h);
  135. _mm_storeu_si128((__m128i*)&State_.H[0],
  136. _mm_xor_si128(_mm_loadu_si128((__m128i*)&State_.H[0]), _mm_xor_si128(row3l, row1l)));
  137. _mm_storeu_si128((__m128i*)&State_.H[2],
  138. _mm_xor_si128(_mm_loadu_si128((__m128i*)&State_.H[2]), _mm_xor_si128(row3h, row1h)));
  139. _mm_storeu_si128((__m128i*)&State_.H[4],
  140. _mm_xor_si128(_mm_loadu_si128((__m128i*)&State_.H[4]), _mm_xor_si128(row4l, row2l)));
  141. _mm_storeu_si128((__m128i*)&State_.H[6],
  142. _mm_xor_si128(_mm_loadu_si128((__m128i*)&State_.H[6]), _mm_xor_si128(row4h, row2h)));
  143. }
  144. }