gf_6vect_mad_vsx.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. #include "ec_base_vsx.h"
  2. void gf_6vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls,
  3. unsigned char *src, unsigned char **dest)
  4. {
  5. unsigned char *s, *t0, *t1, *t2, *t3, *t4, *t5;
  6. vector unsigned char vX1, vX2, vX3, vX4;
  7. vector unsigned char vY1, vY2, vY3, vY4, vY5, vY6, vY7, vY8, vY9, vYA, vYB, vYC;
  8. vector unsigned char vYD, vYE, vYF, vYG, vYH, vYI, vYJ, vYK, vYL, vYM, vYN, vYO;
  9. vector unsigned char vhi0, vlo0, vhi1, vlo1, vhi2, vlo2;
  10. vector unsigned char vhi3, vlo3, vhi4, vlo4, vhi5, vlo5;
  11. int i, head;
  12. s = (unsigned char *)src;
  13. t0 = (unsigned char *)dest[0];
  14. t1 = (unsigned char *)dest[1];
  15. t2 = (unsigned char *)dest[2];
  16. t3 = (unsigned char *)dest[3];
  17. t4 = (unsigned char *)dest[4];
  18. t5 = (unsigned char *)dest[5];
  19. head = len % 64;
  20. if (head != 0) {
  21. gf_vect_mad_base(head, vec, vec_i, &gftbls[0 * 32 * vec], src, t0);
  22. gf_vect_mad_base(head, vec, vec_i, &gftbls[1 * 32 * vec], src, t1);
  23. gf_vect_mad_base(head, vec, vec_i, &gftbls[2 * 32 * vec], src, t2);
  24. gf_vect_mad_base(head, vec, vec_i, &gftbls[3 * 32 * vec], src, t3);
  25. gf_vect_mad_base(head, vec, vec_i, &gftbls[4 * 32 * vec], src, t4);
  26. gf_vect_mad_base(head, vec, vec_i, &gftbls[5 * 32 * vec], src, t5);
  27. }
  28. vlo0 = EC_vec_xl(0, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
  29. vhi0 = EC_vec_xl(16, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
  30. vlo1 = EC_vec_xl(0, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
  31. vhi1 = EC_vec_xl(16, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
  32. vlo2 = EC_vec_xl(0, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
  33. vhi2 = EC_vec_xl(16, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
  34. vlo3 = EC_vec_xl(0, gftbls + (((3 * vec) << 5) + (vec_i << 5)));
  35. vhi3 = EC_vec_xl(16, gftbls + (((3 * vec) << 5) + (vec_i << 5)));
  36. vlo4 = EC_vec_xl(0, gftbls + (((4 * vec) << 5) + (vec_i << 5)));
  37. vhi4 = EC_vec_xl(16, gftbls + (((4 * vec) << 5) + (vec_i << 5)));
  38. vlo5 = EC_vec_xl(0, gftbls + (((5 * vec) << 5) + (vec_i << 5)));
  39. vhi5 = EC_vec_xl(16, gftbls + (((5 * vec) << 5) + (vec_i << 5)));
  40. for (i = head; i < len - 63; i += 64) {
  41. vX1 = vec_xl(0, s + i);
  42. vX2 = vec_xl(16, s + i);
  43. vX3 = vec_xl(32, s + i);
  44. vX4 = vec_xl(48, s + i);
  45. vY1 = vec_xl(0, t0 + i);
  46. vY2 = vec_xl(16, t0 + i);
  47. vYD = vec_xl(32, t0 + i);
  48. vYE = vec_xl(48, t0 + i);
  49. vY1 = vY1 ^ EC_vec_permxor(vhi0, vlo0, vX1);
  50. vY2 = vY2 ^ EC_vec_permxor(vhi0, vlo0, vX2);
  51. vYD = vYD ^ EC_vec_permxor(vhi0, vlo0, vX3);
  52. vYE = vYE ^ EC_vec_permxor(vhi0, vlo0, vX4);
  53. vec_xst(vY1, 0, t0 + i);
  54. vec_xst(vY2, 16, t0 + i);
  55. vec_xst(vYD, 32, t0 + i);
  56. vec_xst(vYE, 48, t0 + i);
  57. vY3 = vec_xl(0, t1 + i);
  58. vY4 = vec_xl(16, t1 + i);
  59. vYF = vec_xl(32, t1 + i);
  60. vYG = vec_xl(48, t1 + i);
  61. vY3 = vY3 ^ EC_vec_permxor(vhi1, vlo1, vX1);
  62. vY4 = vY4 ^ EC_vec_permxor(vhi1, vlo1, vX2);
  63. vYF = vYF ^ EC_vec_permxor(vhi1, vlo1, vX3);
  64. vYG = vYG ^ EC_vec_permxor(vhi1, vlo1, vX4);
  65. vec_xst(vY3, 0, t1 + i);
  66. vec_xst(vY4, 16, t1 + i);
  67. vec_xst(vYF, 32, t1 + i);
  68. vec_xst(vYG, 48, t1 + i);
  69. vY5 = vec_xl(0, t2 + i);
  70. vY6 = vec_xl(16, t2 + i);
  71. vYH = vec_xl(32, t2 + i);
  72. vYI = vec_xl(48, t2 + i);
  73. vY5 = vY5 ^ EC_vec_permxor(vhi2, vlo2, vX1);
  74. vY6 = vY6 ^ EC_vec_permxor(vhi2, vlo2, vX2);
  75. vYH = vYH ^ EC_vec_permxor(vhi2, vlo2, vX3);
  76. vYI = vYI ^ EC_vec_permxor(vhi2, vlo2, vX4);
  77. vY7 = vec_xl(0, t3 + i);
  78. vY8 = vec_xl(16, t3 + i);
  79. vYJ = vec_xl(32, t3 + i);
  80. vYK = vec_xl(48, t3 + i);
  81. vec_xst(vY5, 0, t2 + i);
  82. vec_xst(vY6, 16, t2 + i);
  83. vec_xst(vYH, 32, t2 + i);
  84. vec_xst(vYI, 48, t2 + i);
  85. vY7 = vY7 ^ EC_vec_permxor(vhi3, vlo3, vX1);
  86. vY8 = vY8 ^ EC_vec_permxor(vhi3, vlo3, vX2);
  87. vYJ = vYJ ^ EC_vec_permxor(vhi3, vlo3, vX3);
  88. vYK = vYK ^ EC_vec_permxor(vhi3, vlo3, vX4);
  89. vY9 = vec_xl(0, t4 + i);
  90. vYA = vec_xl(16, t4 + i);
  91. vYL = vec_xl(32, t4 + i);
  92. vYM = vec_xl(48, t4 + i);
  93. vec_xst(vY7, 0, t3 + i);
  94. vec_xst(vY8, 16, t3 + i);
  95. vec_xst(vYJ, 32, t3 + i);
  96. vec_xst(vYK, 48, t3 + i);
  97. vY9 = vY9 ^ EC_vec_permxor(vhi4, vlo4, vX1);
  98. vYA = vYA ^ EC_vec_permxor(vhi4, vlo4, vX2);
  99. vYL = vYL ^ EC_vec_permxor(vhi4, vlo4, vX3);
  100. vYM = vYM ^ EC_vec_permxor(vhi4, vlo4, vX4);
  101. vYB = vec_xl(0, t5 + i);
  102. vYC = vec_xl(16, t5 + i);
  103. vYN = vec_xl(32, t5 + i);
  104. vYO = vec_xl(48, t5 + i);
  105. vec_xst(vY9, 0, t4 + i);
  106. vec_xst(vYA, 16, t4 + i);
  107. vec_xst(vYL, 32, t4 + i);
  108. vec_xst(vYM, 48, t4 + i);
  109. vYB = vYB ^ EC_vec_permxor(vhi5, vlo5, vX1);
  110. vYC = vYC ^ EC_vec_permxor(vhi5, vlo5, vX2);
  111. vYN = vYN ^ EC_vec_permxor(vhi5, vlo5, vX3);
  112. vYO = vYO ^ EC_vec_permxor(vhi5, vlo5, vX4);
  113. vec_xst(vYB, 0, t5 + i);
  114. vec_xst(vYC, 16, t5 + i);
  115. vec_xst(vYN, 32, t5 + i);
  116. vec_xst(vYO, 48, t5 + i);
  117. }
  118. return;
  119. }