enc_better.go 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import "fmt"
  6. const (
  7. betterLongTableBits = 19 // Bits used in the long match table
  8. betterLongTableSize = 1 << betterLongTableBits // Size of the table
  9. betterLongLen = 8 // Bytes used for table hash
  10. // Note: Increasing the short table bits or making the hash shorter
  11. // can actually lead to compression degradation since it will 'steal' more from the
  12. // long match table and match offsets are quite big.
  13. // This greatly depends on the type of input.
  14. betterShortTableBits = 13 // Bits used in the short match table
  15. betterShortTableSize = 1 << betterShortTableBits // Size of the table
  16. betterShortLen = 5 // Bytes used for table hash
  17. betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
  18. betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
  19. betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table
  20. betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
  21. )
  22. type prevEntry struct {
  23. offset int32
  24. prev int32
  25. }
  26. // betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
  27. // The long match table contains the previous entry with the same hash,
  28. // effectively making it a "chain" of length 2.
  29. // When we find a long match we choose between the two values and select the longest.
  30. // When we find a short match, after checking the long, we check if we can find a long at n+1
  31. // and that it is longer (lazy matching).
  32. type betterFastEncoder struct {
  33. fastBase
  34. table [betterShortTableSize]tableEntry
  35. longTable [betterLongTableSize]prevEntry
  36. }
  37. type betterFastEncoderDict struct {
  38. betterFastEncoder
  39. dictTable []tableEntry
  40. dictLongTable []prevEntry
  41. shortTableShardDirty [betterShortTableShardCnt]bool
  42. longTableShardDirty [betterLongTableShardCnt]bool
  43. allDirty bool
  44. }
  45. // Encode improves compression...
  46. func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
  47. const (
  48. // Input margin is the number of bytes we read (8)
  49. // and the maximum we will read ahead (2)
  50. inputMargin = 8 + 2
  51. minNonLiteralBlockSize = 16
  52. )
  53. // Protect against e.cur wraparound.
  54. for e.cur >= e.bufferReset-int32(len(e.hist)) {
  55. if len(e.hist) == 0 {
  56. e.table = [betterShortTableSize]tableEntry{}
  57. e.longTable = [betterLongTableSize]prevEntry{}
  58. e.cur = e.maxMatchOff
  59. break
  60. }
  61. // Shift down everything in the table that isn't already too far away.
  62. minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
  63. for i := range e.table[:] {
  64. v := e.table[i].offset
  65. if v < minOff {
  66. v = 0
  67. } else {
  68. v = v - e.cur + e.maxMatchOff
  69. }
  70. e.table[i].offset = v
  71. }
  72. for i := range e.longTable[:] {
  73. v := e.longTable[i].offset
  74. v2 := e.longTable[i].prev
  75. if v < minOff {
  76. v = 0
  77. v2 = 0
  78. } else {
  79. v = v - e.cur + e.maxMatchOff
  80. if v2 < minOff {
  81. v2 = 0
  82. } else {
  83. v2 = v2 - e.cur + e.maxMatchOff
  84. }
  85. }
  86. e.longTable[i] = prevEntry{
  87. offset: v,
  88. prev: v2,
  89. }
  90. }
  91. e.cur = e.maxMatchOff
  92. break
  93. }
  94. s := e.addBlock(src)
  95. blk.size = len(src)
  96. if len(src) < minNonLiteralBlockSize {
  97. blk.extraLits = len(src)
  98. blk.literals = blk.literals[:len(src)]
  99. copy(blk.literals, src)
  100. return
  101. }
  102. // Override src
  103. src = e.hist
  104. sLimit := int32(len(src)) - inputMargin
  105. // stepSize is the number of bytes to skip on every main loop iteration.
  106. // It should be >= 1.
  107. const stepSize = 1
  108. const kSearchStrength = 9
  109. // nextEmit is where in src the next emitLiteral should start from.
  110. nextEmit := s
  111. cv := load6432(src, s)
  112. // Relative offsets
  113. offset1 := int32(blk.recentOffsets[0])
  114. offset2 := int32(blk.recentOffsets[1])
  115. addLiterals := func(s *seq, until int32) {
  116. if until == nextEmit {
  117. return
  118. }
  119. blk.literals = append(blk.literals, src[nextEmit:until]...)
  120. s.litLen = uint32(until - nextEmit)
  121. }
  122. if debugEncoder {
  123. println("recent offsets:", blk.recentOffsets)
  124. }
  125. encodeLoop:
  126. for {
  127. var t int32
  128. // We allow the encoder to optionally turn off repeat offsets across blocks
  129. canRepeat := len(blk.sequences) > 2
  130. var matched int32
  131. for {
  132. if debugAsserts && canRepeat && offset1 == 0 {
  133. panic("offset0 was 0")
  134. }
  135. nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
  136. nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
  137. candidateL := e.longTable[nextHashL]
  138. candidateS := e.table[nextHashS]
  139. const repOff = 1
  140. repIndex := s - offset1 + repOff
  141. off := s + e.cur
  142. e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
  143. e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
  144. if canRepeat {
  145. if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
  146. // Consider history as well.
  147. var seq seq
  148. lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
  149. seq.matchLen = uint32(lenght - zstdMinMatch)
  150. // We might be able to match backwards.
  151. // Extend as long as we can.
  152. start := s + repOff
  153. // We end the search early, so we don't risk 0 literals
  154. // and have to do special offset treatment.
  155. startLimit := nextEmit + 1
  156. tMin := s - e.maxMatchOff
  157. if tMin < 0 {
  158. tMin = 0
  159. }
  160. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  161. repIndex--
  162. start--
  163. seq.matchLen++
  164. }
  165. addLiterals(&seq, start)
  166. // rep 0
  167. seq.offset = 1
  168. if debugSequences {
  169. println("repeat sequence", seq, "next s:", s)
  170. }
  171. blk.sequences = append(blk.sequences, seq)
  172. // Index match start+1 (long) -> s - 1
  173. index0 := s + repOff
  174. s += lenght + repOff
  175. nextEmit = s
  176. if s >= sLimit {
  177. if debugEncoder {
  178. println("repeat ended", s, lenght)
  179. }
  180. break encodeLoop
  181. }
  182. // Index skipped...
  183. for index0 < s-1 {
  184. cv0 := load6432(src, index0)
  185. cv1 := cv0 >> 8
  186. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  187. off := index0 + e.cur
  188. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  189. e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
  190. index0 += 2
  191. }
  192. cv = load6432(src, s)
  193. continue
  194. }
  195. const repOff2 = 1
  196. // We deviate from the reference encoder and also check offset 2.
  197. // Still slower and not much better, so disabled.
  198. // repIndex = s - offset2 + repOff2
  199. if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
  200. // Consider history as well.
  201. var seq seq
  202. lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
  203. seq.matchLen = uint32(lenght - zstdMinMatch)
  204. // We might be able to match backwards.
  205. // Extend as long as we can.
  206. start := s + repOff2
  207. // We end the search early, so we don't risk 0 literals
  208. // and have to do special offset treatment.
  209. startLimit := nextEmit + 1
  210. tMin := s - e.maxMatchOff
  211. if tMin < 0 {
  212. tMin = 0
  213. }
  214. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  215. repIndex--
  216. start--
  217. seq.matchLen++
  218. }
  219. addLiterals(&seq, start)
  220. // rep 2
  221. seq.offset = 2
  222. if debugSequences {
  223. println("repeat sequence 2", seq, "next s:", s)
  224. }
  225. blk.sequences = append(blk.sequences, seq)
  226. index0 := s + repOff2
  227. s += lenght + repOff2
  228. nextEmit = s
  229. if s >= sLimit {
  230. if debugEncoder {
  231. println("repeat ended", s, lenght)
  232. }
  233. break encodeLoop
  234. }
  235. // Index skipped...
  236. for index0 < s-1 {
  237. cv0 := load6432(src, index0)
  238. cv1 := cv0 >> 8
  239. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  240. off := index0 + e.cur
  241. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  242. e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
  243. index0 += 2
  244. }
  245. cv = load6432(src, s)
  246. // Swap offsets
  247. offset1, offset2 = offset2, offset1
  248. continue
  249. }
  250. }
  251. // Find the offsets of our two matches.
  252. coffsetL := candidateL.offset - e.cur
  253. coffsetLP := candidateL.prev - e.cur
  254. // Check if we have a long match.
  255. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  256. // Found a long match, at least 8 bytes.
  257. matched = e.matchlen(s+8, coffsetL+8, src) + 8
  258. t = coffsetL
  259. if debugAsserts && s <= t {
  260. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  261. }
  262. if debugAsserts && s-t > e.maxMatchOff {
  263. panic("s - t >e.maxMatchOff")
  264. }
  265. if debugMatches {
  266. println("long match")
  267. }
  268. if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
  269. // Found a long match, at least 8 bytes.
  270. prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
  271. if prevMatch > matched {
  272. matched = prevMatch
  273. t = coffsetLP
  274. }
  275. if debugAsserts && s <= t {
  276. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  277. }
  278. if debugAsserts && s-t > e.maxMatchOff {
  279. panic("s - t >e.maxMatchOff")
  280. }
  281. if debugMatches {
  282. println("long match")
  283. }
  284. }
  285. break
  286. }
  287. // Check if we have a long match on prev.
  288. if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
  289. // Found a long match, at least 8 bytes.
  290. matched = e.matchlen(s+8, coffsetLP+8, src) + 8
  291. t = coffsetLP
  292. if debugAsserts && s <= t {
  293. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  294. }
  295. if debugAsserts && s-t > e.maxMatchOff {
  296. panic("s - t >e.maxMatchOff")
  297. }
  298. if debugMatches {
  299. println("long match")
  300. }
  301. break
  302. }
  303. coffsetS := candidateS.offset - e.cur
  304. // Check if we have a short match.
  305. if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
  306. // found a regular match
  307. matched = e.matchlen(s+4, coffsetS+4, src) + 4
  308. // See if we can find a long match at s+1
  309. const checkAt = 1
  310. cv := load6432(src, s+checkAt)
  311. nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
  312. candidateL = e.longTable[nextHashL]
  313. coffsetL = candidateL.offset - e.cur
  314. // We can store it, since we have at least a 4 byte match.
  315. e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
  316. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  317. // Found a long match, at least 8 bytes.
  318. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
  319. if matchedNext > matched {
  320. t = coffsetL
  321. s += checkAt
  322. matched = matchedNext
  323. if debugMatches {
  324. println("long match (after short)")
  325. }
  326. break
  327. }
  328. }
  329. // Check prev long...
  330. coffsetL = candidateL.prev - e.cur
  331. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  332. // Found a long match, at least 8 bytes.
  333. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
  334. if matchedNext > matched {
  335. t = coffsetL
  336. s += checkAt
  337. matched = matchedNext
  338. if debugMatches {
  339. println("prev long match (after short)")
  340. }
  341. break
  342. }
  343. }
  344. t = coffsetS
  345. if debugAsserts && s <= t {
  346. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  347. }
  348. if debugAsserts && s-t > e.maxMatchOff {
  349. panic("s - t >e.maxMatchOff")
  350. }
  351. if debugAsserts && t < 0 {
  352. panic("t<0")
  353. }
  354. if debugMatches {
  355. println("short match")
  356. }
  357. break
  358. }
  359. // No match found, move forward in input.
  360. s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
  361. if s >= sLimit {
  362. break encodeLoop
  363. }
  364. cv = load6432(src, s)
  365. }
  366. // Try to find a better match by searching for a long match at the end of the current best match
  367. if s+matched < sLimit {
  368. // Allow some bytes at the beginning to mismatch.
  369. // Sweet spot is around 3 bytes, but depends on input.
  370. // The skipped bytes are tested in Extend backwards,
  371. // and still picked up as part of the match if they do.
  372. const skipBeginning = 3
  373. nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
  374. s2 := s + skipBeginning
  375. cv := load3232(src, s2)
  376. candidateL := e.longTable[nextHashL]
  377. coffsetL := candidateL.offset - e.cur - matched + skipBeginning
  378. if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
  379. // Found a long match, at least 4 bytes.
  380. matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
  381. if matchedNext > matched {
  382. t = coffsetL
  383. s = s2
  384. matched = matchedNext
  385. if debugMatches {
  386. println("long match at end-of-match")
  387. }
  388. }
  389. }
  390. // Check prev long...
  391. if true {
  392. coffsetL = candidateL.prev - e.cur - matched + skipBeginning
  393. if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
  394. // Found a long match, at least 4 bytes.
  395. matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
  396. if matchedNext > matched {
  397. t = coffsetL
  398. s = s2
  399. matched = matchedNext
  400. if debugMatches {
  401. println("prev long match at end-of-match")
  402. }
  403. }
  404. }
  405. }
  406. }
  407. // A match has been found. Update recent offsets.
  408. offset2 = offset1
  409. offset1 = s - t
  410. if debugAsserts && s <= t {
  411. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  412. }
  413. if debugAsserts && canRepeat && int(offset1) > len(src) {
  414. panic("invalid offset")
  415. }
  416. // Extend the n-byte match as long as possible.
  417. l := matched
  418. // Extend backwards
  419. tMin := s - e.maxMatchOff
  420. if tMin < 0 {
  421. tMin = 0
  422. }
  423. for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
  424. s--
  425. t--
  426. l++
  427. }
  428. // Write our sequence
  429. var seq seq
  430. seq.litLen = uint32(s - nextEmit)
  431. seq.matchLen = uint32(l - zstdMinMatch)
  432. if seq.litLen > 0 {
  433. blk.literals = append(blk.literals, src[nextEmit:s]...)
  434. }
  435. seq.offset = uint32(s-t) + 3
  436. s += l
  437. if debugSequences {
  438. println("sequence", seq, "next s:", s)
  439. }
  440. blk.sequences = append(blk.sequences, seq)
  441. nextEmit = s
  442. if s >= sLimit {
  443. break encodeLoop
  444. }
  445. // Index match start+1 (long) -> s - 1
  446. index0 := s - l + 1
  447. for index0 < s-1 {
  448. cv0 := load6432(src, index0)
  449. cv1 := cv0 >> 8
  450. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  451. off := index0 + e.cur
  452. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  453. e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
  454. index0 += 2
  455. }
  456. cv = load6432(src, s)
  457. if !canRepeat {
  458. continue
  459. }
  460. // Check offset 2
  461. for {
  462. o2 := s - offset2
  463. if load3232(src, o2) != uint32(cv) {
  464. // Do regular search
  465. break
  466. }
  467. // Store this, since we have it.
  468. nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
  469. nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
  470. // We have at least 4 byte match.
  471. // No need to check backwards. We come straight from a match
  472. l := 4 + e.matchlen(s+4, o2+4, src)
  473. e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
  474. e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
  475. seq.matchLen = uint32(l) - zstdMinMatch
  476. seq.litLen = 0
  477. // Since litlen is always 0, this is offset 1.
  478. seq.offset = 1
  479. s += l
  480. nextEmit = s
  481. if debugSequences {
  482. println("sequence", seq, "next s:", s)
  483. }
  484. blk.sequences = append(blk.sequences, seq)
  485. // Swap offset 1 and 2.
  486. offset1, offset2 = offset2, offset1
  487. if s >= sLimit {
  488. // Finished
  489. break encodeLoop
  490. }
  491. cv = load6432(src, s)
  492. }
  493. }
  494. if int(nextEmit) < len(src) {
  495. blk.literals = append(blk.literals, src[nextEmit:]...)
  496. blk.extraLits = len(src) - int(nextEmit)
  497. }
  498. blk.recentOffsets[0] = uint32(offset1)
  499. blk.recentOffsets[1] = uint32(offset2)
  500. if debugEncoder {
  501. println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
  502. }
  503. }
  504. // EncodeNoHist will encode a block with no history and no following blocks.
  505. // Most notable difference is that src will not be copied for history and
  506. // we do not need to check for max match length.
  507. func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
  508. e.ensureHist(len(src))
  509. e.Encode(blk, src)
  510. }
  511. // Encode improves compression...
  512. func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
  513. const (
  514. // Input margin is the number of bytes we read (8)
  515. // and the maximum we will read ahead (2)
  516. inputMargin = 8 + 2
  517. minNonLiteralBlockSize = 16
  518. )
  519. // Protect against e.cur wraparound.
  520. for e.cur >= e.bufferReset-int32(len(e.hist)) {
  521. if len(e.hist) == 0 {
  522. for i := range e.table[:] {
  523. e.table[i] = tableEntry{}
  524. }
  525. for i := range e.longTable[:] {
  526. e.longTable[i] = prevEntry{}
  527. }
  528. e.cur = e.maxMatchOff
  529. e.allDirty = true
  530. break
  531. }
  532. // Shift down everything in the table that isn't already too far away.
  533. minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
  534. for i := range e.table[:] {
  535. v := e.table[i].offset
  536. if v < minOff {
  537. v = 0
  538. } else {
  539. v = v - e.cur + e.maxMatchOff
  540. }
  541. e.table[i].offset = v
  542. }
  543. for i := range e.longTable[:] {
  544. v := e.longTable[i].offset
  545. v2 := e.longTable[i].prev
  546. if v < minOff {
  547. v = 0
  548. v2 = 0
  549. } else {
  550. v = v - e.cur + e.maxMatchOff
  551. if v2 < minOff {
  552. v2 = 0
  553. } else {
  554. v2 = v2 - e.cur + e.maxMatchOff
  555. }
  556. }
  557. e.longTable[i] = prevEntry{
  558. offset: v,
  559. prev: v2,
  560. }
  561. }
  562. e.allDirty = true
  563. e.cur = e.maxMatchOff
  564. break
  565. }
  566. s := e.addBlock(src)
  567. blk.size = len(src)
  568. if len(src) < minNonLiteralBlockSize {
  569. blk.extraLits = len(src)
  570. blk.literals = blk.literals[:len(src)]
  571. copy(blk.literals, src)
  572. return
  573. }
  574. // Override src
  575. src = e.hist
  576. sLimit := int32(len(src)) - inputMargin
  577. // stepSize is the number of bytes to skip on every main loop iteration.
  578. // It should be >= 1.
  579. const stepSize = 1
  580. const kSearchStrength = 9
  581. // nextEmit is where in src the next emitLiteral should start from.
  582. nextEmit := s
  583. cv := load6432(src, s)
  584. // Relative offsets
  585. offset1 := int32(blk.recentOffsets[0])
  586. offset2 := int32(blk.recentOffsets[1])
  587. addLiterals := func(s *seq, until int32) {
  588. if until == nextEmit {
  589. return
  590. }
  591. blk.literals = append(blk.literals, src[nextEmit:until]...)
  592. s.litLen = uint32(until - nextEmit)
  593. }
  594. if debugEncoder {
  595. println("recent offsets:", blk.recentOffsets)
  596. }
  597. encodeLoop:
  598. for {
  599. var t int32
  600. // We allow the encoder to optionally turn off repeat offsets across blocks
  601. canRepeat := len(blk.sequences) > 2
  602. var matched int32
  603. for {
  604. if debugAsserts && canRepeat && offset1 == 0 {
  605. panic("offset0 was 0")
  606. }
  607. nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
  608. nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
  609. candidateL := e.longTable[nextHashL]
  610. candidateS := e.table[nextHashS]
  611. const repOff = 1
  612. repIndex := s - offset1 + repOff
  613. off := s + e.cur
  614. e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
  615. e.markLongShardDirty(nextHashL)
  616. e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
  617. e.markShortShardDirty(nextHashS)
  618. if canRepeat {
  619. if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
  620. // Consider history as well.
  621. var seq seq
  622. lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
  623. seq.matchLen = uint32(lenght - zstdMinMatch)
  624. // We might be able to match backwards.
  625. // Extend as long as we can.
  626. start := s + repOff
  627. // We end the search early, so we don't risk 0 literals
  628. // and have to do special offset treatment.
  629. startLimit := nextEmit + 1
  630. tMin := s - e.maxMatchOff
  631. if tMin < 0 {
  632. tMin = 0
  633. }
  634. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  635. repIndex--
  636. start--
  637. seq.matchLen++
  638. }
  639. addLiterals(&seq, start)
  640. // rep 0
  641. seq.offset = 1
  642. if debugSequences {
  643. println("repeat sequence", seq, "next s:", s)
  644. }
  645. blk.sequences = append(blk.sequences, seq)
  646. // Index match start+1 (long) -> s - 1
  647. index0 := s + repOff
  648. s += lenght + repOff
  649. nextEmit = s
  650. if s >= sLimit {
  651. if debugEncoder {
  652. println("repeat ended", s, lenght)
  653. }
  654. break encodeLoop
  655. }
  656. // Index skipped...
  657. for index0 < s-1 {
  658. cv0 := load6432(src, index0)
  659. cv1 := cv0 >> 8
  660. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  661. off := index0 + e.cur
  662. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  663. e.markLongShardDirty(h0)
  664. h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
  665. e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
  666. e.markShortShardDirty(h1)
  667. index0 += 2
  668. }
  669. cv = load6432(src, s)
  670. continue
  671. }
  672. const repOff2 = 1
  673. // We deviate from the reference encoder and also check offset 2.
  674. // Still slower and not much better, so disabled.
  675. // repIndex = s - offset2 + repOff2
  676. if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
  677. // Consider history as well.
  678. var seq seq
  679. lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
  680. seq.matchLen = uint32(lenght - zstdMinMatch)
  681. // We might be able to match backwards.
  682. // Extend as long as we can.
  683. start := s + repOff2
  684. // We end the search early, so we don't risk 0 literals
  685. // and have to do special offset treatment.
  686. startLimit := nextEmit + 1
  687. tMin := s - e.maxMatchOff
  688. if tMin < 0 {
  689. tMin = 0
  690. }
  691. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  692. repIndex--
  693. start--
  694. seq.matchLen++
  695. }
  696. addLiterals(&seq, start)
  697. // rep 2
  698. seq.offset = 2
  699. if debugSequences {
  700. println("repeat sequence 2", seq, "next s:", s)
  701. }
  702. blk.sequences = append(blk.sequences, seq)
  703. index0 := s + repOff2
  704. s += lenght + repOff2
  705. nextEmit = s
  706. if s >= sLimit {
  707. if debugEncoder {
  708. println("repeat ended", s, lenght)
  709. }
  710. break encodeLoop
  711. }
  712. // Index skipped...
  713. for index0 < s-1 {
  714. cv0 := load6432(src, index0)
  715. cv1 := cv0 >> 8
  716. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  717. off := index0 + e.cur
  718. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  719. e.markLongShardDirty(h0)
  720. h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
  721. e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
  722. e.markShortShardDirty(h1)
  723. index0 += 2
  724. }
  725. cv = load6432(src, s)
  726. // Swap offsets
  727. offset1, offset2 = offset2, offset1
  728. continue
  729. }
  730. }
  731. // Find the offsets of our two matches.
  732. coffsetL := candidateL.offset - e.cur
  733. coffsetLP := candidateL.prev - e.cur
  734. // Check if we have a long match.
  735. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  736. // Found a long match, at least 8 bytes.
  737. matched = e.matchlen(s+8, coffsetL+8, src) + 8
  738. t = coffsetL
  739. if debugAsserts && s <= t {
  740. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  741. }
  742. if debugAsserts && s-t > e.maxMatchOff {
  743. panic("s - t >e.maxMatchOff")
  744. }
  745. if debugMatches {
  746. println("long match")
  747. }
  748. if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
  749. // Found a long match, at least 8 bytes.
  750. prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
  751. if prevMatch > matched {
  752. matched = prevMatch
  753. t = coffsetLP
  754. }
  755. if debugAsserts && s <= t {
  756. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  757. }
  758. if debugAsserts && s-t > e.maxMatchOff {
  759. panic("s - t >e.maxMatchOff")
  760. }
  761. if debugMatches {
  762. println("long match")
  763. }
  764. }
  765. break
  766. }
  767. // Check if we have a long match on prev.
  768. if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
  769. // Found a long match, at least 8 bytes.
  770. matched = e.matchlen(s+8, coffsetLP+8, src) + 8
  771. t = coffsetLP
  772. if debugAsserts && s <= t {
  773. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  774. }
  775. if debugAsserts && s-t > e.maxMatchOff {
  776. panic("s - t >e.maxMatchOff")
  777. }
  778. if debugMatches {
  779. println("long match")
  780. }
  781. break
  782. }
  783. coffsetS := candidateS.offset - e.cur
  784. // Check if we have a short match.
  785. if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
  786. // found a regular match
  787. matched = e.matchlen(s+4, coffsetS+4, src) + 4
  788. // See if we can find a long match at s+1
  789. const checkAt = 1
  790. cv := load6432(src, s+checkAt)
  791. nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
  792. candidateL = e.longTable[nextHashL]
  793. coffsetL = candidateL.offset - e.cur
  794. // We can store it, since we have at least a 4 byte match.
  795. e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
  796. e.markLongShardDirty(nextHashL)
  797. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  798. // Found a long match, at least 8 bytes.
  799. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
  800. if matchedNext > matched {
  801. t = coffsetL
  802. s += checkAt
  803. matched = matchedNext
  804. if debugMatches {
  805. println("long match (after short)")
  806. }
  807. break
  808. }
  809. }
  810. // Check prev long...
  811. coffsetL = candidateL.prev - e.cur
  812. if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
  813. // Found a long match, at least 8 bytes.
  814. matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
  815. if matchedNext > matched {
  816. t = coffsetL
  817. s += checkAt
  818. matched = matchedNext
  819. if debugMatches {
  820. println("prev long match (after short)")
  821. }
  822. break
  823. }
  824. }
  825. t = coffsetS
  826. if debugAsserts && s <= t {
  827. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  828. }
  829. if debugAsserts && s-t > e.maxMatchOff {
  830. panic("s - t >e.maxMatchOff")
  831. }
  832. if debugAsserts && t < 0 {
  833. panic("t<0")
  834. }
  835. if debugMatches {
  836. println("short match")
  837. }
  838. break
  839. }
  840. // No match found, move forward in input.
  841. s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
  842. if s >= sLimit {
  843. break encodeLoop
  844. }
  845. cv = load6432(src, s)
  846. }
  847. // Try to find a better match by searching for a long match at the end of the current best match
  848. if s+matched < sLimit {
  849. nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
  850. cv := load3232(src, s)
  851. candidateL := e.longTable[nextHashL]
  852. coffsetL := candidateL.offset - e.cur - matched
  853. if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
  854. // Found a long match, at least 4 bytes.
  855. matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
  856. if matchedNext > matched {
  857. t = coffsetL
  858. matched = matchedNext
  859. if debugMatches {
  860. println("long match at end-of-match")
  861. }
  862. }
  863. }
  864. // Check prev long...
  865. if true {
  866. coffsetL = candidateL.prev - e.cur - matched
  867. if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
  868. // Found a long match, at least 4 bytes.
  869. matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
  870. if matchedNext > matched {
  871. t = coffsetL
  872. matched = matchedNext
  873. if debugMatches {
  874. println("prev long match at end-of-match")
  875. }
  876. }
  877. }
  878. }
  879. }
  880. // A match has been found. Update recent offsets.
  881. offset2 = offset1
  882. offset1 = s - t
  883. if debugAsserts && s <= t {
  884. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  885. }
  886. if debugAsserts && canRepeat && int(offset1) > len(src) {
  887. panic("invalid offset")
  888. }
  889. // Extend the n-byte match as long as possible.
  890. l := matched
  891. // Extend backwards
  892. tMin := s - e.maxMatchOff
  893. if tMin < 0 {
  894. tMin = 0
  895. }
  896. for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
  897. s--
  898. t--
  899. l++
  900. }
  901. // Write our sequence
  902. var seq seq
  903. seq.litLen = uint32(s - nextEmit)
  904. seq.matchLen = uint32(l - zstdMinMatch)
  905. if seq.litLen > 0 {
  906. blk.literals = append(blk.literals, src[nextEmit:s]...)
  907. }
  908. seq.offset = uint32(s-t) + 3
  909. s += l
  910. if debugSequences {
  911. println("sequence", seq, "next s:", s)
  912. }
  913. blk.sequences = append(blk.sequences, seq)
  914. nextEmit = s
  915. if s >= sLimit {
  916. break encodeLoop
  917. }
  918. // Index match start+1 (long) -> s - 1
  919. index0 := s - l + 1
  920. for index0 < s-1 {
  921. cv0 := load6432(src, index0)
  922. cv1 := cv0 >> 8
  923. h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
  924. off := index0 + e.cur
  925. e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
  926. e.markLongShardDirty(h0)
  927. h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
  928. e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
  929. e.markShortShardDirty(h1)
  930. index0 += 2
  931. }
  932. cv = load6432(src, s)
  933. if !canRepeat {
  934. continue
  935. }
  936. // Check offset 2
  937. for {
  938. o2 := s - offset2
  939. if load3232(src, o2) != uint32(cv) {
  940. // Do regular search
  941. break
  942. }
  943. // Store this, since we have it.
  944. nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
  945. nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
  946. // We have at least 4 byte match.
  947. // No need to check backwards. We come straight from a match
  948. l := 4 + e.matchlen(s+4, o2+4, src)
  949. e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
  950. e.markLongShardDirty(nextHashL)
  951. e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
  952. e.markShortShardDirty(nextHashS)
  953. seq.matchLen = uint32(l) - zstdMinMatch
  954. seq.litLen = 0
  955. // Since litlen is always 0, this is offset 1.
  956. seq.offset = 1
  957. s += l
  958. nextEmit = s
  959. if debugSequences {
  960. println("sequence", seq, "next s:", s)
  961. }
  962. blk.sequences = append(blk.sequences, seq)
  963. // Swap offset 1 and 2.
  964. offset1, offset2 = offset2, offset1
  965. if s >= sLimit {
  966. // Finished
  967. break encodeLoop
  968. }
  969. cv = load6432(src, s)
  970. }
  971. }
  972. if int(nextEmit) < len(src) {
  973. blk.literals = append(blk.literals, src[nextEmit:]...)
  974. blk.extraLits = len(src) - int(nextEmit)
  975. }
  976. blk.recentOffsets[0] = uint32(offset1)
  977. blk.recentOffsets[1] = uint32(offset2)
  978. if debugEncoder {
  979. println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
  980. }
  981. }
  982. // ResetDict will reset and set a dictionary if not nil
  983. func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
  984. e.resetBase(d, singleBlock)
  985. if d != nil {
  986. panic("betterFastEncoder: Reset with dict")
  987. }
  988. }
  989. // ResetDict will reset and set a dictionary if not nil
  990. func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
  991. e.resetBase(d, singleBlock)
  992. if d == nil {
  993. return
  994. }
  995. // Init or copy dict table
  996. if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
  997. if len(e.dictTable) != len(e.table) {
  998. e.dictTable = make([]tableEntry, len(e.table))
  999. }
  1000. end := int32(len(d.content)) - 8 + e.maxMatchOff
  1001. for i := e.maxMatchOff; i < end; i += 4 {
  1002. const hashLog = betterShortTableBits
  1003. cv := load6432(d.content, i-e.maxMatchOff)
  1004. nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
  1005. nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
  1006. nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
  1007. nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
  1008. e.dictTable[nextHash] = tableEntry{
  1009. val: uint32(cv),
  1010. offset: i,
  1011. }
  1012. e.dictTable[nextHash1] = tableEntry{
  1013. val: uint32(cv >> 8),
  1014. offset: i + 1,
  1015. }
  1016. e.dictTable[nextHash2] = tableEntry{
  1017. val: uint32(cv >> 16),
  1018. offset: i + 2,
  1019. }
  1020. e.dictTable[nextHash3] = tableEntry{
  1021. val: uint32(cv >> 24),
  1022. offset: i + 3,
  1023. }
  1024. }
  1025. e.lastDictID = d.id
  1026. e.allDirty = true
  1027. }
  1028. // Init or copy dict table
  1029. if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
  1030. if len(e.dictLongTable) != len(e.longTable) {
  1031. e.dictLongTable = make([]prevEntry, len(e.longTable))
  1032. }
  1033. if len(d.content) >= 8 {
  1034. cv := load6432(d.content, 0)
  1035. h := hashLen(cv, betterLongTableBits, betterLongLen)
  1036. e.dictLongTable[h] = prevEntry{
  1037. offset: e.maxMatchOff,
  1038. prev: e.dictLongTable[h].offset,
  1039. }
  1040. end := int32(len(d.content)) - 8 + e.maxMatchOff
  1041. off := 8 // First to read
  1042. for i := e.maxMatchOff + 1; i < end; i++ {
  1043. cv = cv>>8 | (uint64(d.content[off]) << 56)
  1044. h := hashLen(cv, betterLongTableBits, betterLongLen)
  1045. e.dictLongTable[h] = prevEntry{
  1046. offset: i,
  1047. prev: e.dictLongTable[h].offset,
  1048. }
  1049. off++
  1050. }
  1051. }
  1052. e.lastDictID = d.id
  1053. e.allDirty = true
  1054. }
  1055. // Reset table to initial state
  1056. {
  1057. dirtyShardCnt := 0
  1058. if !e.allDirty {
  1059. for i := range e.shortTableShardDirty {
  1060. if e.shortTableShardDirty[i] {
  1061. dirtyShardCnt++
  1062. }
  1063. }
  1064. }
  1065. const shardCnt = betterShortTableShardCnt
  1066. const shardSize = betterShortTableShardSize
  1067. if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
  1068. copy(e.table[:], e.dictTable)
  1069. for i := range e.shortTableShardDirty {
  1070. e.shortTableShardDirty[i] = false
  1071. }
  1072. } else {
  1073. for i := range e.shortTableShardDirty {
  1074. if !e.shortTableShardDirty[i] {
  1075. continue
  1076. }
  1077. copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
  1078. e.shortTableShardDirty[i] = false
  1079. }
  1080. }
  1081. }
  1082. {
  1083. dirtyShardCnt := 0
  1084. if !e.allDirty {
  1085. for i := range e.shortTableShardDirty {
  1086. if e.shortTableShardDirty[i] {
  1087. dirtyShardCnt++
  1088. }
  1089. }
  1090. }
  1091. const shardCnt = betterLongTableShardCnt
  1092. const shardSize = betterLongTableShardSize
  1093. if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
  1094. copy(e.longTable[:], e.dictLongTable)
  1095. for i := range e.longTableShardDirty {
  1096. e.longTableShardDirty[i] = false
  1097. }
  1098. } else {
  1099. for i := range e.longTableShardDirty {
  1100. if !e.longTableShardDirty[i] {
  1101. continue
  1102. }
  1103. copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
  1104. e.longTableShardDirty[i] = false
  1105. }
  1106. }
  1107. }
  1108. e.cur = e.maxMatchOff
  1109. e.allDirty = false
  1110. }
  1111. func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
  1112. e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
  1113. }
  1114. func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
  1115. e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
  1116. }