level1.go 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. package flate
  2. import "fmt"
  3. // fastGen maintains the table for matches,
  4. // and the previous byte block for level 2.
  5. // This is the generic implementation.
  6. type fastEncL1 struct {
  7. fastGen
  8. table [tableSize]tableEntry
  9. }
  10. // EncodeL1 uses a similar algorithm to level 1
  11. func (e *fastEncL1) Encode(dst *tokens, src []byte) {
  12. const (
  13. inputMargin = 12 - 1
  14. minNonLiteralBlockSize = 1 + 1 + inputMargin
  15. )
  16. if debugDeflate && e.cur < 0 {
  17. panic(fmt.Sprint("e.cur < 0: ", e.cur))
  18. }
  19. // Protect against e.cur wraparound.
  20. for e.cur >= bufferReset {
  21. if len(e.hist) == 0 {
  22. for i := range e.table[:] {
  23. e.table[i] = tableEntry{}
  24. }
  25. e.cur = maxMatchOffset
  26. break
  27. }
  28. // Shift down everything in the table that isn't already too far away.
  29. minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
  30. for i := range e.table[:] {
  31. v := e.table[i].offset
  32. if v <= minOff {
  33. v = 0
  34. } else {
  35. v = v - e.cur + maxMatchOffset
  36. }
  37. e.table[i].offset = v
  38. }
  39. e.cur = maxMatchOffset
  40. }
  41. s := e.addBlock(src)
  42. // This check isn't in the Snappy implementation, but there, the caller
  43. // instead of the callee handles this case.
  44. if len(src) < minNonLiteralBlockSize {
  45. // We do not fill the token table.
  46. // This will be picked up by caller.
  47. dst.n = uint16(len(src))
  48. return
  49. }
  50. // Override src
  51. src = e.hist
  52. nextEmit := s
  53. // sLimit is when to stop looking for offset/length copies. The inputMargin
  54. // lets us use a fast path for emitLiteral in the main loop, while we are
  55. // looking for copies.
  56. sLimit := int32(len(src) - inputMargin)
  57. // nextEmit is where in src the next emitLiteral should start from.
  58. cv := load3232(src, s)
  59. for {
  60. const skipLog = 5
  61. const doEvery = 2
  62. nextS := s
  63. var candidate tableEntry
  64. for {
  65. nextHash := hash(cv)
  66. candidate = e.table[nextHash]
  67. nextS = s + doEvery + (s-nextEmit)>>skipLog
  68. if nextS > sLimit {
  69. goto emitRemainder
  70. }
  71. now := load6432(src, nextS)
  72. e.table[nextHash] = tableEntry{offset: s + e.cur}
  73. nextHash = hash(uint32(now))
  74. offset := s - (candidate.offset - e.cur)
  75. if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
  76. e.table[nextHash] = tableEntry{offset: nextS + e.cur}
  77. break
  78. }
  79. // Do one right away...
  80. cv = uint32(now)
  81. s = nextS
  82. nextS++
  83. candidate = e.table[nextHash]
  84. now >>= 8
  85. e.table[nextHash] = tableEntry{offset: s + e.cur}
  86. offset = s - (candidate.offset - e.cur)
  87. if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
  88. e.table[nextHash] = tableEntry{offset: nextS + e.cur}
  89. break
  90. }
  91. cv = uint32(now)
  92. s = nextS
  93. }
  94. // A 4-byte match has been found. We'll later see if more than 4 bytes
  95. // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
  96. // them as literal bytes.
  97. for {
  98. // Invariant: we have a 4-byte match at s, and no need to emit any
  99. // literal bytes prior to s.
  100. // Extend the 4-byte match as long as possible.
  101. t := candidate.offset - e.cur
  102. l := e.matchlenLong(s+4, t+4, src) + 4
  103. // Extend backwards
  104. for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
  105. s--
  106. t--
  107. l++
  108. }
  109. if nextEmit < s {
  110. emitLiteral(dst, src[nextEmit:s])
  111. }
  112. // Save the match found
  113. dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
  114. s += l
  115. nextEmit = s
  116. if nextS >= s {
  117. s = nextS + 1
  118. }
  119. if s >= sLimit {
  120. // Index first pair after match end.
  121. if int(s+l+4) < len(src) {
  122. cv := load3232(src, s)
  123. e.table[hash(cv)] = tableEntry{offset: s + e.cur}
  124. }
  125. goto emitRemainder
  126. }
  127. // We could immediately start working at s now, but to improve
  128. // compression we first update the hash table at s-2 and at s. If
  129. // another emitCopy is not our next move, also calculate nextHash
  130. // at s+1. At least on GOARCH=amd64, these three hash calculations
  131. // are faster as one load64 call (with some shifts) instead of
  132. // three load32 calls.
  133. x := load6432(src, s-2)
  134. o := e.cur + s - 2
  135. prevHash := hash(uint32(x))
  136. e.table[prevHash] = tableEntry{offset: o}
  137. x >>= 16
  138. currHash := hash(uint32(x))
  139. candidate = e.table[currHash]
  140. e.table[currHash] = tableEntry{offset: o + 2}
  141. offset := s - (candidate.offset - e.cur)
  142. if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
  143. cv = uint32(x >> 8)
  144. s++
  145. break
  146. }
  147. }
  148. }
  149. emitRemainder:
  150. if int(nextEmit) < len(src) {
  151. // If nothing was added, don't encode literals.
  152. if dst.n == 0 {
  153. return
  154. }
  155. emitLiteral(dst, src[nextEmit:])
  156. }
  157. }