blamka_sse2.h 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #pragma once
  2. #include <library/cpp/digest/argonish/internal/rotations/rotations_sse2.h>
  3. namespace NArgonish {
  4. static inline void BlamkaG1SSE2(
  5. __m128i& a0, __m128i& a1, __m128i& b0, __m128i& b1,
  6. __m128i& c0, __m128i& c1, __m128i& d0, __m128i& d1) {
  7. __m128i ml = _mm_mul_epu32(a0, b0);
  8. ml = _mm_add_epi64(ml, ml);
  9. a0 = _mm_add_epi64(a0, _mm_add_epi64(b0, ml));
  10. ml = _mm_mul_epu32(a1, b1);
  11. ml = _mm_add_epi64(ml, ml);
  12. a1 = _mm_add_epi64(a1, _mm_add_epi64(b1, ml));
  13. d0 = _mm_xor_si128(d0, a0);
  14. d1 = _mm_xor_si128(d1, a1);
  15. d0 = Rotr32(d0);
  16. d1 = Rotr32(d1);
  17. ml = _mm_mul_epu32(c0, d0);
  18. ml = _mm_add_epi64(ml, ml);
  19. c0 = _mm_add_epi64(c0, _mm_add_epi64(d0, ml));
  20. ml = _mm_mul_epu32(c1, d1);
  21. ml = _mm_add_epi64(ml, ml);
  22. c1 = _mm_add_epi64(c1, _mm_add_epi64(ml, d1));
  23. b0 = _mm_xor_si128(b0, c0);
  24. b1 = _mm_xor_si128(b1, c1);
  25. b0 = Rotr24(b0);
  26. b1 = Rotr24(b1);
  27. }
  28. static inline void BlamkaG2SSE2(
  29. __m128i& a0, __m128i& a1, __m128i& b0, __m128i& b1,
  30. __m128i& c0, __m128i& c1, __m128i& d0, __m128i& d1) {
  31. __m128i ml = _mm_mul_epu32(a0, b0);
  32. ml = _mm_add_epi64(ml, ml);
  33. a0 = _mm_add_epi64(a0, _mm_add_epi64(b0, ml));
  34. ml = _mm_mul_epu32(a1, b1);
  35. ml = _mm_add_epi64(ml, ml);
  36. a1 = _mm_add_epi64(a1, _mm_add_epi64(b1, ml));
  37. d0 = _mm_xor_si128(d0, a0);
  38. d1 = _mm_xor_si128(d1, a1);
  39. d0 = Rotr16(d0);
  40. d1 = Rotr16(d1);
  41. ml = _mm_mul_epu32(c0, d0);
  42. ml = _mm_add_epi64(ml, ml);
  43. c0 = _mm_add_epi64(c0, _mm_add_epi64(d0, ml));
  44. ml = _mm_mul_epu32(c1, d1);
  45. ml = _mm_add_epi64(ml, ml);
  46. c1 = _mm_add_epi64(c1, _mm_add_epi64(ml, d1));
  47. b0 = _mm_xor_si128(b0, c0);
  48. b1 = _mm_xor_si128(b1, c1);
  49. b0 = Rotr63(b0);
  50. b1 = Rotr63(b1);
  51. }
  52. static inline void DiagonalizeSSE2(
  53. __m128i& b0, __m128i& b1, __m128i& c0, __m128i& c1, __m128i& d0, __m128i& d1) {
  54. __m128i tmp0 = d0;
  55. __m128i tmp1 = b0;
  56. d0 = c0;
  57. c0 = c1;
  58. c1 = d0;
  59. d0 = _mm_unpackhi_epi64(d1, _mm_unpacklo_epi64(tmp0, tmp0));
  60. d1 = _mm_unpackhi_epi64(tmp0, _mm_unpacklo_epi64(d1, d1));
  61. b0 = _mm_unpackhi_epi64(b0, _mm_unpacklo_epi64(b1, b1));
  62. b1 = _mm_unpackhi_epi64(b1, _mm_unpacklo_epi64(tmp1, tmp1));
  63. }
  64. static inline void UndiagonalizeSSE2(
  65. __m128i& b0, __m128i& b1, __m128i& c0, __m128i& c1, __m128i& d0, __m128i& d1) {
  66. __m128i tmp0 = c0;
  67. c0 = c1;
  68. c1 = tmp0;
  69. tmp0 = b0;
  70. __m128i tmp1 = d0;
  71. b0 = _mm_unpackhi_epi64(b1, _mm_unpacklo_epi64(b0, b0));
  72. b1 = _mm_unpackhi_epi64(tmp0, _mm_unpacklo_epi64(b1, b1));
  73. d0 = _mm_unpackhi_epi64(d0, _mm_unpacklo_epi64(d1, d1));
  74. d1 = _mm_unpackhi_epi64(d1, _mm_unpacklo_epi64(tmp1, tmp1));
  75. }
  76. }