multiword_128_64_gcc_amd64_sse2.cc 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. // Copyright 2010 Google Inc. All rights reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // Implements multiword CRC for GCC on i386.
  15. //
  16. // Small comment: the trick described in
  17. // http://software.intel.com/en-us/articles/fast-simd-integer-move-for-the-intel-pentiumr-4-processor
  18. // (replace "movdqa dst, src" with "pshufd $0xE4, src, dst")
  19. // did not work: execution time increased from
  20. // 1.8 CPU cycles/byte to 2.1 CPU cycles/byte.
  21. // So it may be good idea on P4 but it's not on newer CPUs.
  22. //
  23. // movaps/xorps vs. movdqa/pxor did not make any difference.
  24. #include "generic_crc.h"
  25. #include "uint128_sse2.h"
  26. #if defined(__GNUC__) && CRCUTIL_USE_ASM && HAVE_AMD64 && HAVE_SSE2
  27. namespace crcutil {
  28. template<> uint128_sse2
  29. GenericCrc<uint128_sse2, uint128_sse2, uint64, 4>::CrcMultiwordGccAmd64Sse2(
  30. const uint8 *src, const uint8 *end, const uint128_sse2 &start) const;
  31. template<>
  32. uint128_sse2 GenericCrc<uint128_sse2, uint128_sse2, uint64, 4>::CrcMultiword(
  33. const void *data, size_t bytes, const uint128_sse2 &start) const {
  34. const uint8 *src = static_cast<const uint8 *>(data);
  35. uint128_sse2 crc = start ^ this->Base().Canonize();
  36. const uint8 *end = src + bytes;
  37. if (bytes <= 7) {
  38. for (; src < end; ++src) {
  39. CRC_BYTE(this, crc, *src);
  40. }
  41. return (crc ^ this->Base().Canonize());
  42. }
  43. ALIGN_ON_WORD_BOUNDARY_IF_NEEDED(bytes, this, src, end, crc, uint64);
  44. if (src >= end) {
  45. return (crc ^ this->Base().Canonize());
  46. }
  47. return CrcMultiwordGccAmd64Sse2(src, end, crc);
  48. }
  49. #define CRC_WORD_ASM() \
  50. SSE2_MOVQ " %[crc0], %[tmp0]\n" \
  51. "xorq %[tmp0], %[buf0]\n" \
  52. "psrldq $8, %[crc0]\n" \
  53. "movzbq %b[buf0], %[tmp0]\n" \
  54. "shrq $8, %[buf0]\n" \
  55. "addq %[tmp0], %[tmp0]\n" \
  56. "pxor (%[table_word], %[tmp0], 8), %[crc0]\n" \
  57. "movzbq %b[buf0], %[tmp1]\n" \
  58. "shrq $8, %[buf0]\n" \
  59. "addq %[tmp1], %[tmp1]\n" \
  60. "pxor 1*256*16(%[table_word], %[tmp1], 8), %[crc0]\n" \
  61. "movzbq %b[buf0], %[tmp0]\n" \
  62. "shrq $8, %[buf0]\n" \
  63. "addq %[tmp0], %[tmp0]\n" \
  64. "pxor 2*256*16(%[table_word], %[tmp0], 8), %[crc0]\n" \
  65. "movzbq %b[buf0], %[tmp1]\n" \
  66. "shrq $8, %[buf0]\n" \
  67. "addq %[tmp1], %[tmp1]\n" \
  68. "pxor 3*256*16(%[table_word], %[tmp1], 8), %[crc0]\n" \
  69. "movzbq %b[buf0], %[tmp0]\n" \
  70. "shrq $8, %[buf0]\n" \
  71. "addq %[tmp0], %[tmp0]\n" \
  72. "pxor 4*256*16(%[table_word], %[tmp0], 8), %[crc0]\n" \
  73. "movzbq %b[buf0], %[tmp1]\n" \
  74. "shrq $8, %[buf0]\n" \
  75. "addq %[tmp1], %[tmp1]\n" \
  76. "pxor 5*256*16(%[table_word], %[tmp1], 8), %[crc0]\n" \
  77. "movzbq %b[buf0], %[tmp0]\n" \
  78. "shrq $8, %[buf0]\n" \
  79. "addq %[tmp0], %[tmp0]\n" \
  80. "pxor 6*256*16(%[table_word], %[tmp0], 8), %[crc0]\n" \
  81. "addq %[buf0], %[buf0]\n" \
  82. "pxor 7*256*16(%[table_word], %[buf0], 8), %[crc0]\n"
  83. template<> uint128_sse2
  84. GenericCrc<uint128_sse2, uint128_sse2, uint64, 4>::CrcMultiwordGccAmd64Sse2(
  85. const uint8 *src, const uint8 *end, const uint128_sse2 &start) const {
  86. __m128i crc0 = start;
  87. __m128i crc1;
  88. __m128i crc2;
  89. __m128i crc3;
  90. __m128i crc_carryover;
  91. uint64 buf0;
  92. uint64 buf1;
  93. uint64 buf2;
  94. uint64 buf3;
  95. uint64 tmp0;
  96. uint64 tmp1;
  97. asm(
  98. "sub $2*4*8 - 1, %[end]\n"
  99. "cmpq %[src], %[end]\n"
  100. "jbe 2f\n"
  101. "pxor %[crc1], %[crc1]\n"
  102. "pxor %[crc2], %[crc2]\n"
  103. "pxor %[crc3], %[crc3]\n"
  104. "pxor %[crc_carryover], %[crc_carryover]\n"
  105. "movq (%[src]), %[buf0]\n"
  106. "movq 1*8(%[src]), %[buf1]\n"
  107. "movq 2*8(%[src]), %[buf2]\n"
  108. "movq 3*8(%[src]), %[buf3]\n"
  109. "1:\n"
  110. #if HAVE_SSE && CRCUTIL_PREFETCH_WIDTH > 0
  111. "prefetcht0 " TO_STRING(CRCUTIL_PREFETCH_WIDTH) "(%[src])\n"
  112. #endif
  113. #if GCC_VERSION_AVAILABLE(4, 5)
  114. // Bug in GCC 4.2.4?
  115. "add $4*8, %[src]\n"
  116. #else
  117. "lea 4*8(%[src]), %[src]\n"
  118. #endif
  119. "pxor %[crc_carryover], %[crc0]\n"
  120. SSE2_MOVQ " %[crc0], %[tmp0]\n"
  121. "psrldq $8, %[crc0]\n"
  122. "xorq %[tmp0], %[buf0]\n"
  123. "movzbq %b[buf0], %[tmp0]\n"
  124. "pxor %[crc0], %[crc1]\n"
  125. "addq %[tmp0], %[tmp0]\n"
  126. "shrq $8, %[buf0]\n"
  127. "movdqa (%[table], %[tmp0], 8), %[crc0]\n"
  128. SSE2_MOVQ " %[crc1], %[tmp1]\n"
  129. "psrldq $8, %[crc1]\n"
  130. "xorq %[tmp1], %[buf1]\n"
  131. "movzbq %b[buf1], %[tmp1]\n"
  132. "pxor %[crc1], %[crc2]\n"
  133. "addq %[tmp1], %[tmp1]\n"
  134. "shrq $8, %[buf1]\n"
  135. "movdqa (%[table], %[tmp1], 8), %[crc1]\n"
  136. SSE2_MOVQ " %[crc2], %[tmp0]\n"
  137. "psrldq $8, %[crc2]\n"
  138. "xorq %[tmp0], %[buf2]\n"
  139. "movzbq %b[buf2], %[tmp0]\n"
  140. "pxor %[crc2], %[crc3]\n"
  141. "addq %[tmp0], %[tmp0]\n"
  142. "shrq $8, %[buf2]\n"
  143. "movdqa (%[table], %[tmp0], 8), %[crc2]\n"
  144. SSE2_MOVQ " %[crc3], %[tmp1]\n"
  145. "psrldq $8, %[crc3]\n"
  146. "xorq %[tmp1], %[buf3]\n"
  147. "movzbq %b[buf3], %[tmp1]\n"
  148. "movdqa %[crc3], %[crc_carryover]\n"
  149. "addq %[tmp1], %[tmp1]\n"
  150. "shrq $8, %[buf3]\n"
  151. "movdqa (%[table], %[tmp1], 8), %[crc3]\n"
  152. #define XOR(byte) \
  153. "movzbq %b[buf0], %[tmp0]\n" \
  154. "shrq $8, %[buf0]\n" \
  155. "addq %[tmp0], %[tmp0]\n" \
  156. "pxor " #byte "*256*16(%[table], %[tmp0], 8), %[crc0]\n" \
  157. "movzbq %b[buf1], %[tmp1]\n" \
  158. "shrq $8, %[buf1]\n" \
  159. "addq %[tmp1], %[tmp1]\n" \
  160. "pxor " #byte "*256*16(%[table], %[tmp1], 8), %[crc1]\n" \
  161. "movzbq %b[buf2], %[tmp0]\n" \
  162. "shrq $8, %[buf2]\n" \
  163. "addq %[tmp0], %[tmp0]\n" \
  164. "pxor " #byte "*256*16(%[table], %[tmp0], 8), %[crc2]\n" \
  165. "movzbq %b[buf3], %[tmp1]\n" \
  166. "shrq $8, %[buf3]\n" \
  167. "addq %[tmp1], %[tmp1]\n" \
  168. "pxor " #byte "*256*16(%[table], %[tmp1], 8), %[crc3]\n"
  169. XOR(1)
  170. XOR(2)
  171. XOR(3)
  172. XOR(4)
  173. XOR(5)
  174. XOR(6)
  175. #undef XOR
  176. "addq %[buf0], %[buf0]\n"
  177. "pxor 7*256*16(%[table], %[buf0], 8), %[crc0]\n"
  178. "movq (%[src]), %[buf0]\n"
  179. "addq %[buf1], %[buf1]\n"
  180. "pxor 7*256*16(%[table], %[buf1], 8), %[crc1]\n"
  181. "movq 1*8(%[src]), %[buf1]\n"
  182. "addq %[buf2], %[buf2]\n"
  183. "pxor 7*256*16(%[table], %[buf2], 8), %[crc2]\n"
  184. "movq 2*8(%[src]), %[buf2]\n"
  185. "addq %[buf3], %[buf3]\n"
  186. "pxor 7*256*16(%[table], %[buf3], 8), %[crc3]\n"
  187. "movq 3*8(%[src]), %[buf3]\n"
  188. "cmpq %[src], %[end]\n"
  189. "ja 1b\n"
  190. "pxor %[crc_carryover], %[crc0]\n"
  191. CRC_WORD_ASM()
  192. "pxor %[crc1], %[crc0]\n"
  193. "movq %[buf1], %[buf0]\n"
  194. CRC_WORD_ASM()
  195. "pxor %[crc2], %[crc0]\n"
  196. "movq %[buf2], %[buf0]\n"
  197. CRC_WORD_ASM()
  198. "pxor %[crc3], %[crc0]\n"
  199. "movq %[buf3], %[buf0]\n"
  200. CRC_WORD_ASM()
  201. "add $4*8, %[src]\n"
  202. "2:\n"
  203. "add $2*4*8 - 8, %[end]\n"
  204. "cmpq %[src], %[end]\n"
  205. "jbe 4f\n"
  206. "3:\n"
  207. "movq (%[src]), %[buf0]\n"
  208. "addq $8, %[src]\n"
  209. CRC_WORD_ASM()
  210. "cmpq %[src], %[end]\n"
  211. "ja 3b\n"
  212. "4:\n"
  213. "add $7, %[end]\n"
  214. "cmpq %[src], %[end]\n"
  215. "jbe 6f\n"
  216. "5:\n"
  217. "movzbq (%[src]), %[buf0]\n"
  218. "add $1, %[src]\n"
  219. SSE2_MOVQ " %[crc0], %[tmp0]\n"
  220. "movzx %b[tmp0], %[tmp0]\n"
  221. "psrldq $1, %[crc0]\n"
  222. "xor %[buf0], %[tmp0]\n"
  223. "addq %[tmp0], %[tmp0]\n"
  224. "pxor 7*256*16(%[table_word], %[tmp0], 8), %[crc0]\n"
  225. "cmpq %[src], %[end]\n"
  226. "ja 5b\n"
  227. "6:\n"
  228. : // outputs
  229. [src] "+r" (src),
  230. [end] "+r" (end),
  231. [crc0] "+x" (crc0),
  232. [crc1] "=&x" (crc1),
  233. [crc2] "=&x" (crc2),
  234. [crc3] "=&x" (crc3),
  235. [crc_carryover] "=&x" (crc_carryover),
  236. [buf0] "=&r" (buf0),
  237. [buf1] "=&r" (buf1),
  238. [buf2] "=&r" (buf2),
  239. [buf3] "=&r" (buf3),
  240. [tmp0] "=&r" (tmp0),
  241. [tmp1] "=&r" (tmp1)
  242. : // inputs
  243. [table_word] "r" (this->crc_word_),
  244. [table] "r" (this->crc_word_interleaved_));
  245. return (this->Base().Canonize() ^ crc0);
  246. }
  247. } // namespace crcutil
  248. #endif // defined(__GNUC__) && CRCUTIL_USE_ASM && HAVE_AMD64 && HAVE_SSE2