snappy-internal.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. // Copyright 2008 Google Inc. All Rights Reserved.
  2. //
  3. // Redistribution and use in source and binary forms, with or without
  4. // modification, are permitted provided that the following conditions are
  5. // met:
  6. //
  7. // * Redistributions of source code must retain the above copyright
  8. // notice, this list of conditions and the following disclaimer.
  9. // * Redistributions in binary form must reproduce the above
  10. // copyright notice, this list of conditions and the following disclaimer
  11. // in the documentation and/or other materials provided with the
  12. // distribution.
  13. // * Neither the name of Google Inc. nor the names of its
  14. // contributors may be used to endorse or promote products derived from
  15. // this software without specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Internals shared between the Snappy implementation and its unittest.
  30. #ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
  31. #define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
  32. #include "snappy-stubs-internal.h"
  33. namespace snappy {
  34. namespace internal {
  35. // Working memory performs a single allocation to hold all scratch space
  36. // required for compression.
  37. class WorkingMemory {
  38. public:
  39. explicit WorkingMemory(size_t input_size);
  40. ~WorkingMemory();
  41. // Allocates and clears a hash table using memory in "*this",
  42. // stores the number of buckets in "*table_size" and returns a pointer to
  43. // the base of the hash table.
  44. uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
  45. char* GetScratchInput() const { return input_; }
  46. char* GetScratchOutput() const { return output_; }
  47. private:
  48. char* mem_; // the allocated memory, never nullptr
  49. size_t size_; // the size of the allocated memory, never 0
  50. uint16_t* table_; // the pointer to the hashtable
  51. char* input_; // the pointer to the input scratch buffer
  52. char* output_; // the pointer to the output scratch buffer
  53. // No copying
  54. WorkingMemory(const WorkingMemory&);
  55. void operator=(const WorkingMemory&);
  56. };
  57. // Flat array compression that does not emit the "uncompressed length"
  58. // prefix. Compresses "input" string to the "*op" buffer.
  59. //
  60. // REQUIRES: "input_length <= kBlockSize"
  61. // REQUIRES: "op" points to an array of memory that is at least
  62. // "MaxCompressedLength(input_length)" in size.
  63. // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
  64. // REQUIRES: "table_size" is a power of two
  65. //
  66. // Returns an "end" pointer into "op" buffer.
  67. // "end - op" is the compressed size of "input".
  68. char* CompressFragment(const char* input,
  69. size_t input_length,
  70. char* op,
  71. uint16_t* table,
  72. const int table_size);
  73. // Find the largest n such that
  74. //
  75. // s1[0,n-1] == s2[0,n-1]
  76. // and n <= (s2_limit - s2).
  77. //
  78. // Return make_pair(n, n < 8).
  79. // Does not read *s2_limit or beyond.
  80. // Does not read *(s1 + (s2_limit - s2)) or beyond.
  81. // Requires that s2_limit >= s2.
  82. //
  83. // In addition populate *data with the next 5 bytes from the end of the match.
  84. // This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
  85. // that on some arch's this can be done faster in this routine than subsequent
  86. // loading from s2 + n.
  87. //
  88. // Separate implementation for 64-bit, little-endian cpus.
  89. #if !defined(SNAPPY_IS_BIG_ENDIAN) && \
  90. (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || defined(ARCH_ARM))
  91. static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
  92. const char* s2,
  93. const char* s2_limit,
  94. uint64_t* data) {
  95. assert(s2_limit >= s2);
  96. size_t matched = 0;
  97. // This block isn't necessary for correctness; we could just start looping
  98. // immediately. As an optimization though, it is useful. It creates some not
  99. // uncommon code paths that determine, without extra effort, whether the match
  100. // length is less than 8. In short, we are hoping to avoid a conditional
  101. // branch, and perhaps get better code layout from the C++ compiler.
  102. if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
  103. uint64_t a1 = UNALIGNED_LOAD64(s1);
  104. uint64_t a2 = UNALIGNED_LOAD64(s2);
  105. if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
  106. // This code is critical for performance. The reason is that it determines
  107. // how much to advance `ip` (s2). This obviously depends on both the loads
  108. // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
  109. // depends on the advanced `ip` calculated here through a load, hash and
  110. // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
  111. // `candidate`) the variable that limits throughput. This is the reason we
  112. // go through hoops to have this function update `data` for the next iter.
  113. // The straightforward code would use *data, given by
  114. //
  115. // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
  116. //
  117. // as input for the hash table lookup to find next candidate. However
  118. // this forces the load on the data dependency chain of s1, because
  119. // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
  120. // we can also calculate *data by
  121. //
  122. // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
  123. // matched_bytes);
  124. //
  125. // The loads do not depend on s1 anymore and are thus off the bottleneck.
  126. // The straightforward implementation on x86_64 would be to use
  127. //
  128. // shrd rax, rdx, cl (cl being matched_bytes * 8)
  129. //
  130. // unfortunately shrd with a variable shift has a 4 cycle latency. So this
  131. // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
  132. // shift instruction but can only shift 64 bits. If we focus on just
  133. // obtaining the least significant 4 bytes, we can obtain this by
  134. //
  135. // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
  136. // UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
  137. //
  138. // Writen like above this is not a big win, the conditional move would be
  139. // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
  140. // However matched_bytes < 4 is equal to
  141. // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
  142. // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
  143. // (tzcnt), which takes 3 cycles.
  144. uint64_t xorval = a1 ^ a2;
  145. int shift = Bits::FindLSBSetNonZero64(xorval);
  146. size_t matched_bytes = shift >> 3;
  147. #ifndef __x86_64__
  148. *data = UNALIGNED_LOAD64(s2 + matched_bytes);
  149. #else
  150. // Ideally this would just be
  151. //
  152. // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
  153. //
  154. // However clang correctly infers that the above statement participates on
  155. // a critical data dependency chain and thus, unfortunately, refuses to
  156. // use a conditional move (it's tuned to cut data dependencies). In this
  157. // case there is a longer parallel chain anyway AND this will be fairly
  158. // unpredictable.
  159. uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
  160. asm("testl %k2, %k2\n\t"
  161. "cmovzq %1, %0\n\t"
  162. : "+r"(a2)
  163. : "r"(a3), "r"(xorval));
  164. *data = a2 >> (shift & (3 * 8));
  165. #endif
  166. return std::pair<size_t, bool>(matched_bytes, true);
  167. } else {
  168. matched = 8;
  169. s2 += 8;
  170. }
  171. }
  172. // Find out how long the match is. We loop over the data 64 bits at a
  173. // time until we find a 64-bit block that doesn't match; then we find
  174. // the first non-matching bit and use that to calculate the total
  175. // length of the match.
  176. while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
  177. uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
  178. uint64_t a2 = UNALIGNED_LOAD64(s2);
  179. if (a1 == a2) {
  180. s2 += 8;
  181. matched += 8;
  182. } else {
  183. uint64_t xorval = a1 ^ a2;
  184. int shift = Bits::FindLSBSetNonZero64(xorval);
  185. size_t matched_bytes = shift >> 3;
  186. #ifndef __x86_64__
  187. *data = UNALIGNED_LOAD64(s2 + matched_bytes);
  188. #else
  189. uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
  190. asm("testl %k2, %k2\n\t"
  191. "cmovzq %1, %0\n\t"
  192. : "+r"(a2)
  193. : "r"(a3), "r"(xorval));
  194. *data = a2 >> (shift & (3 * 8));
  195. #endif
  196. matched += matched_bytes;
  197. assert(matched >= 8);
  198. return std::pair<size_t, bool>(matched, false);
  199. }
  200. }
  201. while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
  202. if (s1[matched] == *s2) {
  203. ++s2;
  204. ++matched;
  205. } else {
  206. if (s2 <= s2_limit - 8) {
  207. *data = UNALIGNED_LOAD64(s2);
  208. }
  209. return std::pair<size_t, bool>(matched, matched < 8);
  210. }
  211. }
  212. return std::pair<size_t, bool>(matched, matched < 8);
  213. }
  214. #else
  215. static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
  216. const char* s2,
  217. const char* s2_limit,
  218. uint64_t* data) {
  219. // Implementation based on the x86-64 version, above.
  220. assert(s2_limit >= s2);
  221. int matched = 0;
  222. while (s2 <= s2_limit - 4 &&
  223. UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
  224. s2 += 4;
  225. matched += 4;
  226. }
  227. if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
  228. uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
  229. int matching_bits = Bits::FindLSBSetNonZero(x);
  230. matched += matching_bits >> 3;
  231. s2 += matching_bits >> 3;
  232. } else {
  233. while ((s2 < s2_limit) && (s1[matched] == *s2)) {
  234. ++s2;
  235. ++matched;
  236. }
  237. }
  238. if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
  239. return std::pair<size_t, bool>(matched, matched < 8);
  240. }
  241. #endif
  242. // Lookup tables for decompression code. Give --snappy_dump_decompression_table
  243. // to the unit test to recompute char_table.
  244. enum {
  245. LITERAL = 0,
  246. COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
  247. COPY_2_BYTE_OFFSET = 2,
  248. COPY_4_BYTE_OFFSET = 3
  249. };
  250. static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
  251. // Data stored per entry in lookup table:
  252. // Range Bits-used Description
  253. // ------------------------------------
  254. // 1..64 0..7 Literal/copy length encoded in opcode byte
  255. // 0..7 8..10 Copy offset encoded in opcode byte / 256
  256. // 0..4 11..13 Extra bytes after opcode
  257. //
  258. // We use eight bits for the length even though 7 would have sufficed
  259. // because of efficiency reasons:
  260. // (1) Extracting a byte is faster than a bit-field
  261. // (2) It properly aligns copy offset so we do not need a <<8
  262. static constexpr uint16_t char_table[256] = {
  263. // clang-format off
  264. 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
  265. 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
  266. 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
  267. 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
  268. 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
  269. 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
  270. 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
  271. 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
  272. 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
  273. 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
  274. 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
  275. 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
  276. 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
  277. 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
  278. 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
  279. 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
  280. 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
  281. 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
  282. 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
  283. 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
  284. 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
  285. 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
  286. 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
  287. 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
  288. 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
  289. 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
  290. 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
  291. 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
  292. 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
  293. 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
  294. 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
  295. 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
  296. // clang-format on
  297. };
  298. } // end namespace internal
  299. } // end namespace snappy
  300. #endif // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_