gf_3vect_mad_vsx.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #include "ec_base_vsx.h"
  2. void gf_3vect_mad_vsx(int len, int vec, int vec_i, unsigned char *gftbls,
  3. unsigned char *src, unsigned char **dest)
  4. {
  5. unsigned char *s, *t0, *t1, *t2;
  6. vector unsigned char vX1, vX2, vX3, vX4;
  7. vector unsigned char vY1, vY2, vY3, vY4, vY5, vY6;
  8. vector unsigned char vYD, vYE, vYF, vYG, vYH, vYI;
  9. vector unsigned char vhi0, vlo0, vhi1, vlo1, vhi2, vlo2;
  10. int i, head;
  11. s = (unsigned char *)src;
  12. t0 = (unsigned char *)dest[0];
  13. t1 = (unsigned char *)dest[1];
  14. t2 = (unsigned char *)dest[2];
  15. head = len % 64;
  16. if (head != 0) {
  17. gf_vect_mad_base(head, vec, vec_i, &gftbls[0 * 32 * vec], src, t0);
  18. gf_vect_mad_base(head, vec, vec_i, &gftbls[1 * 32 * vec], src, t1);
  19. gf_vect_mad_base(head, vec, vec_i, &gftbls[2 * 32 * vec], src, t2);
  20. }
  21. vlo0 = EC_vec_xl(0, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
  22. vhi0 = EC_vec_xl(16, gftbls + (((0 * vec) << 5) + (vec_i << 5)));
  23. vlo1 = EC_vec_xl(0, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
  24. vhi1 = EC_vec_xl(16, gftbls + (((1 * vec) << 5) + (vec_i << 5)));
  25. vlo2 = EC_vec_xl(0, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
  26. vhi2 = EC_vec_xl(16, gftbls + (((2 * vec) << 5) + (vec_i << 5)));
  27. for (i = head; i < len - 63; i += 64) {
  28. vX1 = vec_xl(0, s + i);
  29. vX2 = vec_xl(16, s + i);
  30. vX3 = vec_xl(32, s + i);
  31. vX4 = vec_xl(48, s + i);
  32. vY1 = vec_xl(0, t0 + i);
  33. vY2 = vec_xl(16, t0 + i);
  34. vYD = vec_xl(32, t0 + i);
  35. vYE = vec_xl(48, t0 + i);
  36. vY1 = vY1 ^ EC_vec_permxor(vhi0, vlo0, vX1);
  37. vY2 = vY2 ^ EC_vec_permxor(vhi0, vlo0, vX2);
  38. vYD = vYD ^ EC_vec_permxor(vhi0, vlo0, vX3);
  39. vYE = vYE ^ EC_vec_permxor(vhi0, vlo0, vX4);
  40. vY3 = vec_xl(0, t1 + i);
  41. vY4 = vec_xl(16, t1 + i);
  42. vYF = vec_xl(32, t1 + i);
  43. vYG = vec_xl(48, t1 + i);
  44. vec_xst(vY1, 0, t0 + i);
  45. vec_xst(vY2, 16, t0 + i);
  46. vec_xst(vYD, 32, t0 + i);
  47. vec_xst(vYE, 48, t0 + i);
  48. vY3 = vY3 ^ EC_vec_permxor(vhi1, vlo1, vX1);
  49. vY4 = vY4 ^ EC_vec_permxor(vhi1, vlo1, vX2);
  50. vYF = vYF ^ EC_vec_permxor(vhi1, vlo1, vX3);
  51. vYG = vYG ^ EC_vec_permxor(vhi1, vlo1, vX4);
  52. vY5 = vec_xl(0, t2 + i);
  53. vY6 = vec_xl(16, t2 + i);
  54. vYH = vec_xl(32, t2 + i);
  55. vYI = vec_xl(48, t2 + i);
  56. vec_xst(vY3, 0, t1 + i);
  57. vec_xst(vY4, 16, t1 + i);
  58. vec_xst(vYF, 32, t1 + i);
  59. vec_xst(vYG, 48, t1 + i);
  60. vY5 = vY5 ^ EC_vec_permxor(vhi2, vlo2, vX1);
  61. vY6 = vY6 ^ EC_vec_permxor(vhi2, vlo2, vX2);
  62. vYH = vYH ^ EC_vec_permxor(vhi2, vlo2, vX3);
  63. vYI = vYI ^ EC_vec_permxor(vhi2, vlo2, vX4);
  64. vec_xst(vY5, 0, t2 + i);
  65. vec_xst(vY6, 16, t2 + i);
  66. vec_xst(vYH, 32, t2 + i);
  67. vec_xst(vYI, 48, t2 + i);
  68. }
  69. return;
  70. }