reader.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. package lz4
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "fmt"
  6. "hash"
  7. "io"
  8. "io/ioutil"
  9. "runtime"
  10. "sync"
  11. "sync/atomic"
  12. )
  13. // errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
  14. // It is not an error.
  15. var errEndOfBlock = errors.New("end of block")
  16. // Reader implements the LZ4 frame decoder.
  17. // The Header is set after the first call to Read().
  18. // The Header may change between Read() calls in case of concatenated frames.
  19. type Reader struct {
  20. Header
  21. Pos int64 // position within the source
  22. src io.Reader
  23. checksum hash.Hash32 // frame hash
  24. wg sync.WaitGroup // decompressing go routine wait group
  25. data []byte // buffered decompressed data
  26. window []byte // 64Kb decompressed data window
  27. }
  28. // NewReader returns a new LZ4 frame decoder.
  29. // No access to the underlying io.Reader is performed.
  30. func NewReader(src io.Reader) *Reader {
  31. return &Reader{
  32. src: src,
  33. checksum: hashPool.Get(),
  34. }
  35. }
  36. // readHeader checks the frame magic number and parses the frame descriptoz.
  37. // Skippable frames are supported even as a first frame although the LZ4
  38. // specifications recommends skippable frames not to be used as first frames.
  39. func (z *Reader) readHeader(first bool) error {
  40. defer z.checksum.Reset()
  41. for {
  42. var magic uint32
  43. if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
  44. if !first && err == io.ErrUnexpectedEOF {
  45. return io.EOF
  46. }
  47. return err
  48. }
  49. z.Pos += 4
  50. if magic>>8 == frameSkipMagic>>8 {
  51. var skipSize uint32
  52. if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
  53. return err
  54. }
  55. z.Pos += 4
  56. m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
  57. z.Pos += m
  58. if err != nil {
  59. return err
  60. }
  61. continue
  62. }
  63. if magic != frameMagic {
  64. return fmt.Errorf("lz4.Read: invalid frame magic number: got %x expected %x", magic, frameMagic)
  65. }
  66. break
  67. }
  68. // header
  69. var buf [8]byte
  70. if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
  71. return err
  72. }
  73. z.Pos += 2
  74. b := buf[0]
  75. if b>>6 != Version {
  76. return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
  77. }
  78. z.BlockDependency = b>>5&1 == 0
  79. z.BlockChecksum = b>>4&1 > 0
  80. frameSize := b>>3&1 > 0
  81. z.NoChecksum = b>>2&1 == 0
  82. // z.Dict = b&1 > 0
  83. bmsID := buf[1] >> 4 & 0x7
  84. bSize, ok := bsMapID[bmsID]
  85. if !ok {
  86. return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
  87. }
  88. z.BlockMaxSize = bSize
  89. z.checksum.Write(buf[0:2])
  90. if frameSize {
  91. if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
  92. return err
  93. }
  94. z.Pos += 8
  95. binary.LittleEndian.PutUint64(buf[:], z.Size)
  96. z.checksum.Write(buf[0:8])
  97. }
  98. // if z.Dict {
  99. // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
  100. // return err
  101. // }
  102. // z.Pos += 4
  103. // binary.LittleEndian.PutUint32(buf[:], z.DictID)
  104. // z.checksum.Write(buf[0:4])
  105. // }
  106. // header checksum
  107. if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
  108. return err
  109. }
  110. z.Pos++
  111. if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
  112. return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
  113. }
  114. z.Header.done = true
  115. return nil
  116. }
  117. // Read decompresses data from the underlying source into the supplied buffer.
  118. //
  119. // Since there can be multiple streams concatenated, Header values may
  120. // change between calls to Read(). If that is the case, no data is actually read from
  121. // the underlying io.Reader, to allow for potential input buffer resizing.
  122. //
  123. // Data is buffered if the input buffer is too small, and exhausted upon successive calls.
  124. //
  125. // If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
  126. // no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
  127. func (z *Reader) Read(buf []byte) (n int, err error) {
  128. if !z.Header.done {
  129. if err = z.readHeader(true); err != nil {
  130. return
  131. }
  132. }
  133. if len(buf) == 0 {
  134. return
  135. }
  136. // exhaust remaining data from previous Read()
  137. if len(z.data) > 0 {
  138. n = copy(buf, z.data)
  139. z.data = z.data[n:]
  140. if len(z.data) == 0 {
  141. z.data = nil
  142. }
  143. return
  144. }
  145. // Break up the input buffer into BlockMaxSize blocks with at least one block.
  146. // Then decompress into each of them concurrently if possible (no dependency).
  147. // In case of dependency, the first block will be missing the window (except on the
  148. // very first call), the rest will have it already since it comes from the previous block.
  149. wbuf := buf
  150. zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
  151. zblocks := make([]block, zn)
  152. for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
  153. zb := &zblocks[zi]
  154. // last block may be too small
  155. if len(wbuf) < z.BlockMaxSize+len(z.window) {
  156. wbuf = make([]byte, z.BlockMaxSize+len(z.window))
  157. }
  158. copy(wbuf, z.window)
  159. if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
  160. break
  161. }
  162. wbuf = wbuf[z.BlockMaxSize:]
  163. if !z.BlockDependency {
  164. z.wg.Add(1)
  165. go z.decompressBlock(zb, &abort)
  166. continue
  167. }
  168. // cannot decompress concurrently when dealing with block dependency
  169. z.decompressBlock(zb, nil)
  170. // the last block may not contain enough data
  171. if len(zb.data) >= winSize {
  172. if len(z.window) == 0 {
  173. z.window = make([]byte, winSize)
  174. }
  175. copy(z.window, zb.data[len(zb.data)-winSize:])
  176. }
  177. }
  178. z.wg.Wait()
  179. // since a block size may be less then BlockMaxSize, trim the decompressed buffers
  180. for _, zb := range zblocks {
  181. if zb.err != nil {
  182. if zb.err == errEndOfBlock {
  183. return n, z.close()
  184. }
  185. return n, zb.err
  186. }
  187. bLen := len(zb.data)
  188. if !z.NoChecksum {
  189. z.checksum.Write(zb.data)
  190. }
  191. m := copy(buf[n:], zb.data)
  192. // buffer the remaining data (this is necessarily the last block)
  193. if m < bLen {
  194. z.data = zb.data[m:]
  195. }
  196. n += m
  197. }
  198. return
  199. }
  200. // readBlock reads an entire frame block from the frame.
  201. // The input buffer is the one that will receive the decompressed data.
  202. // If the end of the frame is detected, it returns the errEndOfBlock error.
  203. func (z *Reader) readBlock(buf []byte, b *block) error {
  204. var bLen int32
  205. if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
  206. return err
  207. }
  208. z.Pos += 4
  209. switch {
  210. case bLen == 0:
  211. return errEndOfBlock
  212. case bLen > 0:
  213. b.compressed = true
  214. b.data = buf
  215. b.zdata = make([]byte, bLen)
  216. default:
  217. b.data = buf[:-bLen]
  218. b.zdata = buf[:-bLen]
  219. }
  220. if _, err := io.ReadFull(z.src, b.zdata); err != nil {
  221. return err
  222. }
  223. if z.BlockChecksum {
  224. if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
  225. return err
  226. }
  227. xxh := hashPool.Get()
  228. defer hashPool.Put(xxh)
  229. xxh.Write(b.zdata)
  230. if h := xxh.Sum32(); h != b.checksum {
  231. return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
  232. }
  233. }
  234. return nil
  235. }
  236. // decompressBlock decompresses a frame block.
  237. // In case of an error, the block err is set with it and abort is set to 1.
  238. func (z *Reader) decompressBlock(b *block, abort *uint32) {
  239. if abort != nil {
  240. defer z.wg.Done()
  241. }
  242. if b.compressed {
  243. n := len(z.window)
  244. m, err := UncompressBlock(b.zdata, b.data, n)
  245. if err != nil {
  246. if abort != nil {
  247. atomic.StoreUint32(abort, 1)
  248. }
  249. b.err = err
  250. return
  251. }
  252. b.data = b.data[n : n+m]
  253. }
  254. z.Pos += int64(len(b.data))
  255. }
  256. // close validates the frame checksum (if any) and checks the next frame (if any).
  257. func (z *Reader) close() error {
  258. if !z.NoChecksum {
  259. var checksum uint32
  260. if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
  261. return err
  262. }
  263. if checksum != z.checksum.Sum32() {
  264. return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
  265. }
  266. }
  267. // get ready for the next concatenated frame, but do not change the position
  268. pos := z.Pos
  269. z.Reset(z.src)
  270. z.Pos = pos
  271. // since multiple frames can be concatenated, check for another one
  272. return z.readHeader(false)
  273. }
  274. // Reset discards the Reader's state and makes it equivalent to the
  275. // result of its original state from NewReader, but reading from r instead.
  276. // This permits reusing a Reader rather than allocating a new one.
  277. func (z *Reader) Reset(r io.Reader) {
  278. z.Header = Header{}
  279. z.Pos = 0
  280. z.src = r
  281. z.checksum.Reset()
  282. z.data = nil
  283. z.window = nil
  284. }
  285. // WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
  286. // Returns the number of bytes written.
  287. func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
  288. cpus := runtime.GOMAXPROCS(0)
  289. var buf []byte
  290. // The initial buffer being nil, the first Read will be only read the compressed frame options.
  291. // The buffer can then be sized appropriately to support maximum concurrency decompression.
  292. // If multiple frames are concatenated, Read() will return with no data decompressed but with
  293. // potentially changed options. The buffer will be resized accordingly, always trying to
  294. // maximize concurrency.
  295. for {
  296. nsize := 0
  297. // the block max size can change if multiple streams are concatenated.
  298. // Check it after every Read().
  299. if z.BlockDependency {
  300. // in case of dependency, we cannot decompress concurrently,
  301. // so allocate the minimum buffer + window size
  302. nsize = len(z.window) + z.BlockMaxSize
  303. } else {
  304. // if no dependency, allocate a buffer large enough for concurrent decompression
  305. nsize = cpus * z.BlockMaxSize
  306. }
  307. if nsize != len(buf) {
  308. buf = make([]byte, nsize)
  309. }
  310. m, er := z.Read(buf)
  311. if er != nil && er != io.EOF {
  312. return n, er
  313. }
  314. m, err = w.Write(buf[:m])
  315. n += int64(m)
  316. if err != nil || er == io.EOF {
  317. return
  318. }
  319. }
  320. }