fast_encoder.go 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. // Copyright 2011 The Snappy-Go Authors. All rights reserved.
  2. // Modified for deflate by Klaus Post (c) 2015.
  3. // Use of this source code is governed by a BSD-style
  4. // license that can be found in the LICENSE file.
  5. package flate
  6. import (
  7. "encoding/binary"
  8. "fmt"
  9. "math/bits"
  10. )
  11. type fastEnc interface {
  12. Encode(dst *tokens, src []byte)
  13. Reset()
  14. }
  15. func newFastEnc(level int) fastEnc {
  16. switch level {
  17. case 1:
  18. return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
  19. case 2:
  20. return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
  21. case 3:
  22. return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
  23. case 4:
  24. return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
  25. case 5:
  26. return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
  27. case 6:
  28. return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
  29. default:
  30. panic("invalid level specified")
  31. }
  32. }
  33. const (
  34. tableBits = 15 // Bits used in the table
  35. tableSize = 1 << tableBits // Size of the table
  36. tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
  37. baseMatchOffset = 1 // The smallest match offset
  38. baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
  39. maxMatchOffset = 1 << 15 // The largest match offset
  40. bTableBits = 17 // Bits used in the big tables
  41. bTableSize = 1 << bTableBits // Size of the table
  42. allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
  43. bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
  44. )
  45. const (
  46. prime3bytes = 506832829
  47. prime4bytes = 2654435761
  48. prime5bytes = 889523592379
  49. prime6bytes = 227718039650203
  50. prime7bytes = 58295818150454627
  51. prime8bytes = 0xcf1bbcdcb7a56463
  52. )
  53. func load3232(b []byte, i int32) uint32 {
  54. return binary.LittleEndian.Uint32(b[i:])
  55. }
  56. func load6432(b []byte, i int32) uint64 {
  57. return binary.LittleEndian.Uint64(b[i:])
  58. }
  59. type tableEntry struct {
  60. offset int32
  61. }
  62. // fastGen maintains the table for matches,
  63. // and the previous byte block for level 2.
  64. // This is the generic implementation.
  65. type fastGen struct {
  66. hist []byte
  67. cur int32
  68. }
  69. func (e *fastGen) addBlock(src []byte) int32 {
  70. // check if we have space already
  71. if len(e.hist)+len(src) > cap(e.hist) {
  72. if cap(e.hist) == 0 {
  73. e.hist = make([]byte, 0, allocHistory)
  74. } else {
  75. if cap(e.hist) < maxMatchOffset*2 {
  76. panic("unexpected buffer size")
  77. }
  78. // Move down
  79. offset := int32(len(e.hist)) - maxMatchOffset
  80. // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
  81. *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
  82. e.cur += offset
  83. e.hist = e.hist[:maxMatchOffset]
  84. }
  85. }
  86. s := int32(len(e.hist))
  87. e.hist = append(e.hist, src...)
  88. return s
  89. }
  90. type tableEntryPrev struct {
  91. Cur tableEntry
  92. Prev tableEntry
  93. }
  94. // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
  95. // Preferably h should be a constant and should always be <64.
  96. func hash7(u uint64, h uint8) uint32 {
  97. return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
  98. }
  99. // hashLen returns a hash of the lowest mls bytes of with length output bits.
  100. // mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
  101. // length should always be < 32.
  102. // Preferably length and mls should be a constant for inlining.
  103. func hashLen(u uint64, length, mls uint8) uint32 {
  104. switch mls {
  105. case 3:
  106. return (uint32(u<<8) * prime3bytes) >> (32 - length)
  107. case 5:
  108. return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
  109. case 6:
  110. return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
  111. case 7:
  112. return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
  113. case 8:
  114. return uint32((u * prime8bytes) >> (64 - length))
  115. default:
  116. return (uint32(u) * prime4bytes) >> (32 - length)
  117. }
  118. }
  119. // matchlen will return the match length between offsets and t in src.
  120. // The maximum length returned is maxMatchLength - 4.
  121. // It is assumed that s > t, that t >=0 and s < len(src).
  122. func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
  123. if debugDecode {
  124. if t >= s {
  125. panic(fmt.Sprint("t >=s:", t, s))
  126. }
  127. if int(s) >= len(src) {
  128. panic(fmt.Sprint("s >= len(src):", s, len(src)))
  129. }
  130. if t < 0 {
  131. panic(fmt.Sprint("t < 0:", t))
  132. }
  133. if s-t > maxMatchOffset {
  134. panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
  135. }
  136. }
  137. s1 := int(s) + maxMatchLength - 4
  138. if s1 > len(src) {
  139. s1 = len(src)
  140. }
  141. // Extend the match to be as long as possible.
  142. return int32(matchLen(src[s:s1], src[t:]))
  143. }
  144. // matchlenLong will return the match length between offsets and t in src.
  145. // It is assumed that s > t, that t >=0 and s < len(src).
  146. func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
  147. if debugDeflate {
  148. if t >= s {
  149. panic(fmt.Sprint("t >=s:", t, s))
  150. }
  151. if int(s) >= len(src) {
  152. panic(fmt.Sprint("s >= len(src):", s, len(src)))
  153. }
  154. if t < 0 {
  155. panic(fmt.Sprint("t < 0:", t))
  156. }
  157. if s-t > maxMatchOffset {
  158. panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
  159. }
  160. }
  161. // Extend the match to be as long as possible.
  162. return int32(matchLen(src[s:], src[t:]))
  163. }
  164. // Reset the encoding table.
  165. func (e *fastGen) Reset() {
  166. if cap(e.hist) < allocHistory {
  167. e.hist = make([]byte, 0, allocHistory)
  168. }
  169. // We offset current position so everything will be out of reach.
  170. // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
  171. if e.cur <= bufferReset {
  172. e.cur += maxMatchOffset + int32(len(e.hist))
  173. }
  174. e.hist = e.hist[:0]
  175. }
  176. // matchLen returns the maximum length.
  177. // 'a' must be the shortest of the two.
  178. func matchLen(a, b []byte) int {
  179. var checked int
  180. for len(a) >= 8 {
  181. if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
  182. return checked + (bits.TrailingZeros64(diff) >> 3)
  183. }
  184. checked += 8
  185. a = a[8:]
  186. b = b[8:]
  187. }
  188. b = b[:len(a)]
  189. for i := range a {
  190. if a[i] != b[i] {
  191. return i + checked
  192. }
  193. }
  194. return len(a) + checked
  195. }