multiword_64_64_gcc_amd64_asm.cc 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. // Copyright 2010 Google Inc. All rights reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // Implements multiword CRC for GCC on AMD64.
  15. //
  16. // Accoding to "Software Optimization Guide for AMD Family 10h Processors"
  17. // http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/40546.pdf
  18. // instead of
  19. // movzbq %al, %rsi
  20. // shrq $8, %rax
  21. // [use %rsi]
  22. // movzbq %al, %rsi
  23. // shrq $8, %rax
  24. // [use %rsi]
  25. // it is better to use 32-bit registers
  26. // (high 32 bits will be cleared on assignment), i.e.
  27. // movzbl %al, %esi
  28. // [use %rsi]
  29. // movzbl %ah, %esi
  30. // shrq $16, %rax
  31. // [use %rsi]
  32. // Makes instructions shorter and removes one shift
  33. // (the latter is not such a big deal as it's execution time
  34. // is nicely masked by [use %rsi] instruction).
  35. //
  36. // Performance difference:
  37. // About 10% degradation on bytes = 8 .. 16
  38. // (clobbering registers that should be saved)
  39. // Break even at 32 bytes.
  40. // 3% improvement starting from 64 bytes.
  41. #include "generic_crc.h"
  42. #if defined(__GNUC__) && CRCUTIL_USE_ASM && HAVE_AMD64
  43. namespace crcutil {
  44. template<> uint64 GenericCrc<uint64, uint64, uint64, 4>::CrcMultiwordGccAmd64(
  45. const void *data, size_t bytes, const uint64 &start) const;
  46. template<> uint64 GenericCrc<uint64, uint64, uint64, 4>::CrcMultiword(
  47. const void *data,
  48. size_t bytes,
  49. const uint64 &start) const {
  50. if (bytes <= 6 * sizeof(Word) - 1) {
  51. const uint8 *src = static_cast<const uint8 *>(data);
  52. uint64 crc = start ^ this->Base().Canonize();
  53. const uint8 *end = src + bytes;
  54. #define PROCESS_ONE_WORD() do { \
  55. Word buf = reinterpret_cast<const Word *>(src)[0]; \
  56. CRC_WORD(this, crc, buf); \
  57. src += sizeof(Word); \
  58. } while (0)
  59. if (bytes >= 1 * sizeof(Word)) {
  60. PROCESS_ONE_WORD();
  61. if (bytes >= 2 * sizeof(Word)) {
  62. PROCESS_ONE_WORD();
  63. if (bytes >= 3 * sizeof(Word)) {
  64. PROCESS_ONE_WORD();
  65. if (bytes >= 4 * sizeof(Word)) {
  66. PROCESS_ONE_WORD();
  67. if (bytes >= 5 * sizeof(Word)) {
  68. PROCESS_ONE_WORD();
  69. }
  70. }
  71. }
  72. }
  73. }
  74. for (; src < end; ++src) {
  75. CRC_BYTE(this, crc, *src);
  76. }
  77. return (crc ^ this->Base().Canonize());
  78. }
  79. return this->CrcMultiwordGccAmd64(data, bytes, start);
  80. }
  81. #define TMP0 "%%rsi"
  82. #define TMP0W "%%esi"
  83. #define BUF0 "%%rax"
  84. #define BUF0L "%%al"
  85. #define BUF0H "%%ah"
  86. #define BUF1 "%%rbx"
  87. #define BUF1L "%%bl"
  88. #define BUF1H "%%bh"
  89. #define BUF2 "%%rcx"
  90. #define BUF2L "%%cl"
  91. #define BUF2H "%%ch"
  92. #define BUF3 "%%rdx"
  93. #define BUF3L "%%dl"
  94. #define BUF3H "%%dh"
  95. #define CRC_WORD_ASM() \
  96. "xorq %[crc0], " BUF0 "\n" \
  97. "movzbq " BUF0L ", " TMP0 "\n" \
  98. "movq (%[table_word], " TMP0 ", 8), %[crc0]\n" \
  99. "movzbl " BUF0H ", " TMP0W "\n" \
  100. "shrq $16, " BUF0 "\n" \
  101. "xorq 1*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  102. "movzbq " BUF0L ", " TMP0 "\n" \
  103. "xorq 2*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  104. "movzbl " BUF0H ", " TMP0W "\n" \
  105. "shrq $16, " BUF0 "\n" \
  106. "xorq 3*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  107. "movzbq " BUF0L ", " TMP0 "\n" \
  108. "xorq 4*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  109. "movzbl " BUF0H ", " TMP0W "\n" \
  110. "shrq $16, " BUF0 "\n" \
  111. "xorq 5*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  112. "movzbq " BUF0L ", " TMP0 "\n" \
  113. "xorq 6*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n" \
  114. "movzbl " BUF0H ", " TMP0W "\n" \
  115. "xorq 7*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n"
  116. template<> uint64 GenericCrc<uint64, uint64, uint64, 4>::CrcMultiwordGccAmd64(
  117. const void *data, size_t bytes, const uint64 &start) const {
  118. const uint8 *src = static_cast<const uint8 *>(data);
  119. const uint8 *end = src + bytes;
  120. uint64 crc0 = start ^ this->Base().Canonize();
  121. ALIGN_ON_WORD_BOUNDARY_IF_NEEDED(bytes, this, src, end, crc0, uint64);
  122. if (src >= end) {
  123. return (crc0 ^ this->Base().Canonize());
  124. }
  125. uint64 crc1;
  126. uint64 crc2;
  127. uint64 crc3;
  128. asm(
  129. "sub $2*4*8 - 1, %[end]\n"
  130. "cmpq %[src], %[end]\n"
  131. "jbe 2f\n"
  132. "xorq %[crc1], %[crc1]\n"
  133. "movq (%[src]), " BUF0 "\n"
  134. "movq 1*8(%[src]), " BUF1 "\n"
  135. "movq 2*8(%[src]), " BUF2 "\n"
  136. "movq 3*8(%[src]), " BUF3 "\n"
  137. "movq %[crc1], %[crc2]\n"
  138. "movq %[crc1], %[crc3]\n"
  139. "1:\n"
  140. #if HAVE_SSE && CRCUTIL_PREFETCH_WIDTH > 0
  141. "prefetcht0 " TO_STRING(CRCUTIL_PREFETCH_WIDTH) "(%[src])\n"
  142. #endif // HAVE_SSE
  143. "add $4*8, %[src]\n"
  144. // Set buffer data.
  145. "xorq %[crc0], " BUF0 "\n"
  146. "xorq %[crc1], " BUF1 "\n"
  147. "xorq %[crc2], " BUF2 "\n"
  148. "xorq %[crc3], " BUF3 "\n"
  149. // LOAD crc of byte 0 and shift buffers.
  150. "movzbl " BUF0L ", " TMP0W "\n"
  151. "movq (%[table], " TMP0 ", 8), %[crc0]\n"
  152. "movzbl " BUF1L ", " TMP0W "\n"
  153. "movq (%[table], " TMP0 ", 8), %[crc1]\n"
  154. "movzbl " BUF2L ", " TMP0W "\n"
  155. "movq (%[table], " TMP0 ", 8), %[crc2]\n"
  156. "movzbl " BUF3L ", " TMP0W "\n"
  157. "movq (%[table], " TMP0 ", 8), %[crc3]\n"
  158. #define XOR1(byte1) \
  159. "movzbl " BUF0L ", " TMP0W "\n" \
  160. "xorq " #byte1 "*256*8(%[table], " TMP0 ", 8), %[crc0]\n" \
  161. "movzbl " BUF1L ", " TMP0W "\n" \
  162. "xorq " #byte1 "*256*8(%[table], " TMP0 ", 8), %[crc1]\n" \
  163. "movzbl " BUF2L ", " TMP0W "\n" \
  164. "xorq " #byte1 "*256*8(%[table], " TMP0 ", 8), %[crc2]\n" \
  165. "movzbl " BUF3L ", " TMP0W "\n" \
  166. "xorq " #byte1 "*256*8(%[table], " TMP0 ", 8), %[crc3]\n"
  167. #define XOR2(byte2) \
  168. "movzbl " BUF0H ", " TMP0W "\n" \
  169. "shrq $16, " BUF0 "\n" \
  170. "xorq " #byte2 "*256*8(%[table], " TMP0 ", 8), %[crc0]\n" \
  171. "movzbl " BUF1H ", " TMP0W "\n" \
  172. "shrq $16, " BUF1 "\n" \
  173. "xorq " #byte2 "*256*8(%[table], " TMP0 ", 8), %[crc1]\n" \
  174. "movzbl " BUF2H ", " TMP0W "\n" \
  175. "shrq $16, " BUF2 "\n" \
  176. "xorq " #byte2 "*256*8(%[table], " TMP0 ", 8), %[crc2]\n" \
  177. "movzbl " BUF3H ", " TMP0W "\n" \
  178. "shrq $16, " BUF3 "\n" \
  179. "xorq " #byte2 "*256*8(%[table], " TMP0 ", 8), %[crc3]\n"
  180. XOR2(1)
  181. XOR1(2)
  182. XOR2(3)
  183. XOR1(4)
  184. XOR2(5)
  185. XOR1(6)
  186. // Update CRC registers and load buffers.
  187. "movzbl " BUF0H ", " TMP0W "\n"
  188. "xorq 7*256*8(%[table], " TMP0 ", 8), %[crc0]\n"
  189. "movq (%[src]), " BUF0 "\n"
  190. "movzbl " BUF1H ", " TMP0W "\n"
  191. "xorq 7*256*8(%[table], " TMP0 ", 8), %[crc1]\n"
  192. "movq 1*8(%[src]), " BUF1 "\n"
  193. "movzbl " BUF2H ", " TMP0W "\n"
  194. "xorq 7*256*8(%[table], " TMP0 ", 8), %[crc2]\n"
  195. "movq 2*8(%[src]), " BUF2 "\n"
  196. "movzbl " BUF3H ", " TMP0W "\n"
  197. "xorq 7*256*8(%[table], " TMP0 ", 8), %[crc3]\n"
  198. "movq 3*8(%[src]), " BUF3 "\n"
  199. "cmpq %[src], %[end]\n"
  200. "ja 1b\n"
  201. CRC_WORD_ASM()
  202. "xorq %[crc1], " BUF1 "\n"
  203. "movq " BUF1 ", " BUF0 "\n"
  204. CRC_WORD_ASM()
  205. "xorq %[crc2], " BUF2 "\n"
  206. "movq " BUF2 ", " BUF0 "\n"
  207. CRC_WORD_ASM()
  208. "xorq %[crc3], " BUF3 "\n"
  209. "movq " BUF3 ", " BUF0 "\n"
  210. CRC_WORD_ASM()
  211. "add $4*8, %[src]\n"
  212. "2:\n"
  213. "add $2*4*8 - 8, %[end]\n"
  214. "cmpq %[src], %[end]\n"
  215. "jbe 4f\n"
  216. "3:\n"
  217. "movq (%[src]), " BUF0 "\n"
  218. "add $8, %[src]\n"
  219. CRC_WORD_ASM()
  220. "cmpq %[src], %[end]\n"
  221. "ja 3b\n"
  222. "4:\n"
  223. "add $7, %[end]\n"
  224. "cmpq %[src], %[end]\n"
  225. "jbe 6f\n"
  226. "5:\n"
  227. "movzbq (%[src]), " BUF0 "\n"
  228. "movzbq %b[crc0], " TMP0 "\n"
  229. "shrq $8, %[crc0]\n"
  230. "xorq " BUF0 ", " TMP0 "\n"
  231. "add $1, %[src]\n"
  232. "xorq 7*256*8(%[table_word], " TMP0 ", 8), %[crc0]\n"
  233. "cmpq %[src], %[end]\n"
  234. "ja 5b\n"
  235. "6:\n"
  236. : // outputs
  237. [src] "+r" (src),
  238. [end] "+r" (end),
  239. [crc0] "+r" (crc0),
  240. [crc1] "=&r" (crc1),
  241. [crc2] "=&r" (crc2),
  242. [crc3] "=&r" (crc3)
  243. : // inputs
  244. [table] "r" (&this->crc_word_interleaved_[0][0]),
  245. [table_word] "r" (&this->crc_word_[0][0])
  246. : // clobbers
  247. "%rax", // BUF0
  248. "%rbx", // BUF1
  249. "%rcx", // BUF2
  250. "%rdx", // BUF3
  251. "%rsi" // TMP0
  252. );
  253. return (crc0 ^ this->Base().Canonize());
  254. }
  255. } // namespace crcutil
  256. #endif // defined(__GNUC__) && HAVE_AMD64 && CRCUTIL_USE_ASM