Browse Source

moved frame related types and errors to their own internal package to cleanup the lz4 package scope

Pierre.Curto 5 years ago
parent
commit
e80bcfe16a

+ 5 - 4
bench_test.go

@@ -7,6 +7,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/pierrec/lz4"
 	"github.com/pierrec/lz4"
+	"github.com/pierrec/lz4/internal/lz4block"
 )
 )
 
 
 func BenchmarkCompress(b *testing.B) {
 func BenchmarkCompress(b *testing.B) {
@@ -16,7 +17,7 @@ func BenchmarkCompress(b *testing.B) {
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		_, _ = lz4.CompressBlock(pg1661, buf, nil)
+		_, _ = lz4block.CompressBlock(pg1661, buf, nil)
 	}
 	}
 }
 }
 
 
@@ -28,7 +29,7 @@ func BenchmarkCompressRandom(b *testing.B) {
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		_, _ = lz4.CompressBlock(random, buf, nil)
+		_, _ = lz4block.CompressBlock(random, buf, nil)
 	}
 	}
 }
 }
 
 
@@ -39,7 +40,7 @@ func BenchmarkCompressHC(b *testing.B) {
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		_, _ = lz4.CompressBlockHC(pg1661, buf, 16, nil)
+		_, _ = lz4block.CompressBlockHC(pg1661, buf, 16, nil)
 	}
 	}
 }
 }
 
 
@@ -50,7 +51,7 @@ func BenchmarkUncompress(b *testing.B) {
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		_, _ = lz4.UncompressBlock(pg1661LZ4, buf)
+		_, _ = lz4block.UncompressBlock(pg1661LZ4, buf)
 	}
 	}
 }
 }
 
 

+ 3 - 2
example_test.go

@@ -7,6 +7,7 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/pierrec/lz4"
 	"github.com/pierrec/lz4"
+	"github.com/pierrec/lz4/internal/lz4block"
 )
 )
 
 
 func Example() {
 func Example() {
@@ -37,7 +38,7 @@ func ExampleCompressBlock() {
 	data := []byte(strings.Repeat(s, 100))
 	data := []byte(strings.Repeat(s, 100))
 	buf := make([]byte, len(data))
 	buf := make([]byte, len(data))
 
 
-	n, err := lz4.CompressBlock(data, buf, nil)
+	n, err := lz4block.CompressBlock(data, buf, nil)
 	if err != nil {
 	if err != nil {
 		fmt.Println(err)
 		fmt.Println(err)
 	}
 	}
@@ -48,7 +49,7 @@ func ExampleCompressBlock() {
 
 
 	// Allocated a very large buffer for decompression.
 	// Allocated a very large buffer for decompression.
 	out := make([]byte, 10*len(data))
 	out := make([]byte, 10*len(data))
-	n, err = lz4.UncompressBlock(buf, out)
+	n, err = lz4block.UncompressBlock(buf, out)
 	if err != nil {
 	if err != nil {
 		fmt.Println(err)
 		fmt.Println(err)
 	}
 	}

+ 29 - 42
block.go → internal/lz4block/block.go

@@ -1,17 +1,36 @@
-package lz4
+package lz4block
 
 
 import (
 import (
 	"encoding/binary"
 	"encoding/binary"
 	"math/bits"
 	"math/bits"
 	"sync"
 	"sync"
+
+	"github.com/pierrec/lz4/internal/lz4errors"
+)
+
+const (
+	// The following constants are used to setup the compression algorithm.
+	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog = 16 // LZ4 64Kb window size limit
+	winSize    = 1 << winSizeLog
+	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
 )
 )
 
 
 // Pool of hash tables for CompressBlock.
 // Pool of hash tables for CompressBlock.
-var htPool = sync.Pool{New: func() interface{} { return make([]int, htSize) }}
+var HashTablePool = sync.Pool{New: func() interface{} { return make([]int, htSize) }}
 
 
 func recoverBlock(e *error) {
 func recoverBlock(e *error) {
 	if r := recover(); r != nil && *e == nil {
 	if r := recover(); r != nil && *e == nil {
-		*e = ErrInvalidSourceShortBuffer
+		*e = lz4errors.ErrInvalidSourceShortBuffer
 	}
 	}
 }
 }
 
 
@@ -21,17 +40,10 @@ func blockHash(x uint64) uint32 {
 	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
 	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
 }
 }
 
 
-// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
 func CompressBlockBound(n int) int {
 func CompressBlockBound(n int) int {
 	return n + n/255 + 16
 	return n + n/255 + 16
 }
 }
 
 
-// UncompressBlock uncompresses the source buffer into the destination one,
-// and returns the uncompressed size.
-//
-// The destination buffer must be sized appropriately.
-//
-// An error is returned if the source data is invalid or the destination buffer is too small.
 func UncompressBlock(src, dst []byte) (int, error) {
 func UncompressBlock(src, dst []byte) (int, error) {
 	if len(src) == 0 {
 	if len(src) == 0 {
 		return 0, nil
 		return 0, nil
@@ -39,22 +51,9 @@ func UncompressBlock(src, dst []byte) (int, error) {
 	if di := decodeBlock(dst, src); di >= 0 {
 	if di := decodeBlock(dst, src); di >= 0 {
 		return di, nil
 		return di, nil
 	}
 	}
-	return 0, ErrInvalidSourceShortBuffer
+	return 0, lz4errors.ErrInvalidSourceShortBuffer
 }
 }
 
 
-// CompressBlock compresses the source buffer into the destination one.
-// This is the fast version of LZ4 compression and also the default one.
-//
-// The argument hashTable is scratch space for a hash table used by the
-// compressor. If provided, it should have length at least 1<<16. If it is
-// shorter (or nil), CompressBlock allocates its own hash table.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
 func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 	defer recoverBlock(&err)
 	defer recoverBlock(&err)
 
 
@@ -75,8 +74,8 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 	}
 	}
 
 
 	if cap(hashTable) < htSize {
 	if cap(hashTable) < htSize {
-		hashTable = htPool.Get().([]int)
-		defer htPool.Put(hashTable)
+		hashTable = HashTablePool.Get().([]int)
+		defer HashTablePool.Put(hashTable)
 	} else {
 	} else {
 		hashTable = hashTable[:htSize]
 		hashTable = hashTable[:htSize]
 	}
 	}
@@ -240,17 +239,6 @@ func blockHashHC(x uint32) uint32 {
 	return x * hasher >> (32 - winSizeLog)
 	return x * hasher >> (32 - winSizeLog)
 }
 }
 
 
-// CompressBlockHC compresses the source buffer src into the destination dst
-// with max search depth (use 0 or negative value for no max).
-//
-// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
 func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
 func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
 	defer recoverBlock(&err)
 	defer recoverBlock(&err)
 
 
@@ -272,21 +260,20 @@ func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (
 	}
 	}
 
 
 	if cap(hashTable) < htSize {
 	if cap(hashTable) < htSize {
-		hashTable = htPool.Get().([]int)
-		defer htPool.Put(hashTable)
+		hashTable = HashTablePool.Get().([]int)
+		defer HashTablePool.Put(hashTable)
 	} else {
 	} else {
 		hashTable = hashTable[:htSize]
 		hashTable = hashTable[:htSize]
 	}
 	}
 	_ = hashTable[htSize-1]
 	_ = hashTable[htSize-1]
-	chainTable = htPool.Get().([]int)
-	defer htPool.Put(chainTable)
+	chainTable = HashTablePool.Get().([]int)
+	defer HashTablePool.Put(chainTable)
 	_ = chainTable[htSize-1]
 	_ = chainTable[htSize-1]
 
 
 	if depth <= 0 {
 	if depth <= 0 {
 		depth = winSize
 		depth = winSize
 	}
 	}
 
 
-
 	for si < sn {
 	for si < sn {
 		// Hash the next 4 bytes (sequence).
 		// Hash the next 4 bytes (sequence).
 		match := binary.LittleEndian.Uint32(src[si:])
 		match := binary.LittleEndian.Uint32(src[si:])

+ 9 - 8
block_test.go → internal/lz4block/block_test.go

@@ -1,6 +1,6 @@
 //+build go1.9
 //+build go1.9
 
 
-package lz4_test
+package lz4block_test
 
 
 import (
 import (
 	"bytes"
 	"bytes"
@@ -9,6 +9,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/pierrec/lz4"
 	"github.com/pierrec/lz4"
+	"github.com/pierrec/lz4/internal/lz4block"
 )
 )
 
 
 type testcase struct {
 type testcase struct {
@@ -37,7 +38,7 @@ func TestCompressUncompressBlock(t *testing.T) {
 		src := tc.src
 		src := tc.src
 
 
 		// Compress the data.
 		// Compress the data.
-		zbuf := make([]byte, lz4.CompressBlockBound(len(src)))
+		zbuf := make([]byte, lz4block.CompressBlockBound(len(src)))
 		n, err := compress(src, zbuf)
 		n, err := compress(src, zbuf)
 		if err != nil {
 		if err != nil {
 			t.Error(err)
 			t.Error(err)
@@ -57,7 +58,7 @@ func TestCompressUncompressBlock(t *testing.T) {
 
 
 		// Uncompress the data.
 		// Uncompress the data.
 		buf := make([]byte, len(src))
 		buf := make([]byte, len(src))
-		n, err = lz4.UncompressBlock(zbuf, buf)
+		n, err = lz4block.UncompressBlock(zbuf, buf)
 		if err != nil {
 		if err != nil {
 			t.Fatal(err)
 			t.Fatal(err)
 		} else if n < 0 || n > len(buf) {
 		} else if n < 0 || n > len(buf) {
@@ -97,7 +98,7 @@ func TestCompressUncompressBlock(t *testing.T) {
 			tc := tc
 			tc := tc
 			t.Run(tc.file, func(t *testing.T) {
 			t.Run(tc.file, func(t *testing.T) {
 				n = run(t, tc, func(src, dst []byte) (int, error) {
 				n = run(t, tc, func(src, dst []byte) (int, error) {
-					return lz4.CompressBlock(src, dst, nil)
+					return lz4block.CompressBlock(src, dst, nil)
 				})
 				})
 			})
 			})
 			//TODO
 			//TODO
@@ -138,19 +139,19 @@ func TestCompressCornerCase_CopyDstUpperBound(t *testing.T) {
 	t.Run(file, func(t *testing.T) {
 	t.Run(file, func(t *testing.T) {
 		t.Parallel()
 		t.Parallel()
 		run(src, func(src, dst []byte) (int, error) {
 		run(src, func(src, dst []byte) (int, error) {
-			return lz4.CompressBlock(src, dst, nil)
+			return lz4block.CompressBlock(src, dst, nil)
 		})
 		})
 	})
 	})
 	t.Run(fmt.Sprintf("%s HC", file), func(t *testing.T) {
 	t.Run(fmt.Sprintf("%s HC", file), func(t *testing.T) {
 		t.Parallel()
 		t.Parallel()
 		run(src, func(src, dst []byte) (int, error) {
 		run(src, func(src, dst []byte) (int, error) {
-			return lz4.CompressBlockHC(src, dst, 16, nil)
+			return lz4block.CompressBlockHC(src, dst, 16, nil)
 		})
 		})
 	})
 	})
 }
 }
 
 
 func TestIssue23(t *testing.T) {
 func TestIssue23(t *testing.T) {
-	compressBuf := make([]byte, lz4.CompressBlockBound(1<<16))
+	compressBuf := make([]byte, lz4block.CompressBlockBound(1<<16))
 	for j := 1; j < 16; j++ {
 	for j := 1; j < 16; j++ {
 		var buf [1 << 16]byte
 		var buf [1 << 16]byte
 
 
@@ -158,7 +159,7 @@ func TestIssue23(t *testing.T) {
 			buf[i] = 1
 			buf[i] = 1
 		}
 		}
 
 
-		n, _ := lz4.CompressBlock(buf[:], compressBuf, nil)
+		n, _ := lz4block.CompressBlock(buf[:], compressBuf, nil)
 		if got, want := n, 300; got > want {
 		if got, want := n, 300; got > want {
 			t.Fatalf("not able to compress repeated data: got %d; want %d", got, want)
 			t.Fatalf("not able to compress repeated data: got %d; want %d", got, want)
 		}
 		}

+ 78 - 0
internal/lz4block/blocks.go

@@ -0,0 +1,78 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+	Block64Kb uint32 = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+)
+
+var (
+	BlockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+	BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+	BlockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+	BlockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+	switch b {
+	case Block64Kb:
+		return 4
+	case Block256Kb:
+		return 5
+	case Block1Mb:
+		return 6
+	case Block4Mb:
+		return 7
+	}
+	return 0
+}
+
+func IsValid(b uint32) bool {
+	return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+	switch b {
+	case 4, 5, 6, 7:
+		return true
+	}
+	return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+	var buf interface{}
+	switch b {
+	case 4:
+		buf = BlockPool64K.Get()
+	case 5:
+		buf = BlockPool256K.Get()
+	case 6:
+		buf = BlockPool1M.Get()
+	case 7:
+		buf = BlockPool4M.Get()
+	}
+	return buf.([]byte)
+}
+
+func (b BlockSizeIndex) Put(buf []byte) {
+	switch b {
+	case 4:
+		BlockPool64K.Put(buf)
+	case 5:
+		BlockPool256K.Put(buf)
+	case 6:
+		BlockPool1M.Put(buf)
+	case 7:
+		BlockPool4M.Put(buf)
+	}
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0

+ 1 - 1
decode_amd64.go → internal/lz4block/decode_amd64.go

@@ -2,7 +2,7 @@
 // +build gc
 // +build gc
 // +build !noasm
 // +build !noasm
 
 
-package lz4
+package lz4block
 
 
 //go:noescape
 //go:noescape
 func decodeBlock(dst, src []byte) int
 func decodeBlock(dst, src []byte) int

+ 0 - 0
decode_amd64.s → internal/lz4block/decode_amd64.s


+ 1 - 1
decode_other.go → internal/lz4block/decode_other.go

@@ -1,6 +1,6 @@
 // +build !amd64 appengine !gc noasm
 // +build !amd64 appengine !gc noasm
 
 
-package lz4
+package lz4block
 
 
 func decodeBlock(dst, src []byte) (ret int) {
 func decodeBlock(dst, src []byte) (ret int) {
 	const hasError = -2
 	const hasError = -2

+ 1 - 1
decode_test.go → internal/lz4block/decode_test.go

@@ -1,4 +1,4 @@
-package lz4
+package lz4block
 
 
 import (
 import (
 	"bytes"
 	"bytes"

+ 31 - 0
internal/lz4errors/errors.go

@@ -0,0 +1,31 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
+	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+	ErrInvalidFrame Error = "lz4: bad magic number"
+	// ErrInternalUnhandledState is an internal error.
+	ErrInternalUnhandledState Error = "lz4: unhandled state"
+	// ErrInvalidHeaderChecksum is returned when reading a frame.
+	ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
+	// ErrInvalidBlockChecksum is returned when reading a frame.
+	ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
+	// ErrInvalidFrameChecksum is returned when reading a frame.
+	ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
+	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+	ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+	ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
+	// ErrOptionInvalidBlockSize is returned when
+	ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
+	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+	ErrOptionNotApplicable Error = "lz4: option not applicable"
+	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+	ErrWriterNotClosed Error = "lz4: writer not closed"
+)

+ 36 - 28
frame.go → internal/lz4stream/frame.go

@@ -1,4 +1,5 @@
-package lz4
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
 
 
 import (
 import (
 	"encoding/binary"
 	"encoding/binary"
@@ -6,11 +7,18 @@ import (
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 
 
+	"github.com/pierrec/lz4/internal/lz4block"
+	"github.com/pierrec/lz4/internal/lz4errors"
 	"github.com/pierrec/lz4/internal/xxh32"
 	"github.com/pierrec/lz4/internal/xxh32"
 )
 )
 
 
 //go:generate go run gen.go
 //go:generate go run gen.go
 
 
+const (
+	frameMagic     uint32 = 0x184D2204
+	frameSkipMagic uint32 = 0x184D2A50
+)
+
 func NewFrame() *Frame {
 func NewFrame() *Frame {
 	return &Frame{}
 	return &Frame{}
 }
 }
@@ -24,14 +32,14 @@ type Frame struct {
 	checksum   xxh32.XXHZero
 	checksum   xxh32.XXHZero
 }
 }
 
 
-func (f *Frame) initW(dst io.Writer, num int) {
+func (f *Frame) InitW(dst io.Writer, num int) {
 	f.Magic = frameMagic
 	f.Magic = frameMagic
 	f.Descriptor.initW()
 	f.Descriptor.initW()
 	f.Blocks.initW(f, dst, num)
 	f.Blocks.initW(f, dst, num)
 	f.checksum.Reset()
 	f.checksum.Reset()
 }
 }
 
 
-func (f *Frame) closeW(dst io.Writer, num int) error {
+func (f *Frame) CloseW(dst io.Writer, num int) error {
 	if err := f.Blocks.closeW(f, num); err != nil {
 	if err := f.Blocks.closeW(f, num); err != nil {
 		return err
 		return err
 	}
 	}
@@ -45,7 +53,7 @@ func (f *Frame) closeW(dst io.Writer, num int) error {
 	return err
 	return err
 }
 }
 
 
-func (f *Frame) initR(src io.Reader) error {
+func (f *Frame) InitR(src io.Reader) error {
 	if f.Magic > 0 {
 	if f.Magic > 0 {
 		// Header already read.
 		// Header already read.
 		return nil
 		return nil
@@ -67,7 +75,7 @@ newFrame:
 		}
 		}
 		goto newFrame
 		goto newFrame
 	default:
 	default:
-		return ErrInvalidFrame
+		return lz4errors.ErrInvalidFrame
 	}
 	}
 	if err := f.Descriptor.initR(f, src); err != nil {
 	if err := f.Descriptor.initR(f, src); err != nil {
 		return err
 		return err
@@ -77,7 +85,7 @@ newFrame:
 	return nil
 	return nil
 }
 }
 
 
-func (f *Frame) closeR(src io.Reader) error {
+func (f *Frame) CloseR(src io.Reader) error {
 	f.Magic = 0
 	f.Magic = 0
 	if !f.Descriptor.Flags.ContentChecksum() {
 	if !f.Descriptor.Flags.ContentChecksum() {
 		return nil
 		return nil
@@ -86,7 +94,7 @@ func (f *Frame) closeR(src io.Reader) error {
 		return err
 		return err
 	}
 	}
 	if c := f.checksum.Sum32(); c != f.Checksum {
 	if c := f.checksum.Sum32(); c != f.Checksum {
-		return fmt.Errorf("%w: got %x; expected %x", ErrInvalidFrameChecksum, c, f.Checksum)
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -102,7 +110,7 @@ func (fd *FrameDescriptor) initW() {
 	fd.Flags.BlockIndependenceSet(true)
 	fd.Flags.BlockIndependenceSet(true)
 }
 }
 
 
-func (fd *FrameDescriptor) write(f *Frame, dst io.Writer) error {
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
 	if fd.Checksum > 0 {
 	if fd.Checksum > 0 {
 		// Header already written.
 		// Header already written.
 		return nil
 		return nil
@@ -143,11 +151,11 @@ func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
 	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
 	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
 	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
 	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
 	if c := descriptorChecksum(buf); fd.Checksum != c {
 	if c := descriptorChecksum(buf); fd.Checksum != c {
-		return fmt.Errorf("%w: got %x; expected %x", ErrInvalidHeaderChecksum, c, fd.Checksum)
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
 	}
 	}
 	// Validate the elements that can be.
 	// Validate the elements that can be.
-	if !fd.Flags.BlockSizeIndex().isValid() {
-		return ErrOptionInvalidBlockSize
+	if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+		return lz4errors.ErrOptionInvalidBlockSize
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -166,7 +174,7 @@ func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
 	size := f.Descriptor.Flags.BlockSizeIndex()
 	size := f.Descriptor.Flags.BlockSizeIndex()
 	if num == 1 {
 	if num == 1 {
 		b.Blocks = nil
 		b.Blocks = nil
-		b.Block = newFrameDataBlock(size)
+		b.Block = NewFrameDataBlock(size)
 		return
 		return
 	}
 	}
 	if cap(b.Blocks) != num {
 	if cap(b.Blocks) != num {
@@ -190,7 +198,7 @@ func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
 			// Do not attempt to write the block upon any previous failure.
 			// Do not attempt to write the block upon any previous failure.
 			if b.err == nil {
 			if b.err == nil {
 				// Write the block.
 				// Write the block.
-				if err := block.write(f, dst); err != nil && b.err == nil {
+				if err := block.Write(f, dst); err != nil && b.err == nil {
 					// Keep the first error.
 					// Keep the first error.
 					b.err = err
 					b.err = err
 					// All pending compression goroutines need to shut down, so we need to keep going.
 					// All pending compression goroutines need to shut down, so we need to keep going.
@@ -218,11 +226,11 @@ func (b *Blocks) closeW(f *Frame, num int) error {
 
 
 func (b *Blocks) initR(f *Frame) {
 func (b *Blocks) initR(f *Frame) {
 	size := f.Descriptor.Flags.BlockSizeIndex()
 	size := f.Descriptor.Flags.BlockSizeIndex()
-	b.Block = newFrameDataBlock(size)
+	b.Block = NewFrameDataBlock(size)
 }
 }
 
 
-func newFrameDataBlock(size BlockSizeIndex) *FrameDataBlock {
-	return &FrameDataBlock{Data: size.get()}
+func NewFrameDataBlock(size lz4block.BlockSizeIndex) *FrameDataBlock {
+	return &FrameDataBlock{Data: size.Get()}
 }
 }
 
 
 type FrameDataBlock struct {
 type FrameDataBlock struct {
@@ -233,24 +241,24 @@ type FrameDataBlock struct {
 
 
 func (b *FrameDataBlock) closeW(f *Frame) {
 func (b *FrameDataBlock) closeW(f *Frame) {
 	size := f.Descriptor.Flags.BlockSizeIndex()
 	size := f.Descriptor.Flags.BlockSizeIndex()
-	size.put(b.Data)
+	size.Put(b.Data)
 }
 }
 
 
 // Block compression errors are ignored since the buffer is sized appropriately.
 // Block compression errors are ignored since the buffer is sized appropriately.
-func (b *FrameDataBlock) compress(f *Frame, src []byte, ht []int, level CompressionLevel) *FrameDataBlock {
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, ht []int, level lz4block.CompressionLevel) *FrameDataBlock {
 	data := b.Data[:len(src)] // trigger the incompressible flag in CompressBlock
 	data := b.Data[:len(src)] // trigger the incompressible flag in CompressBlock
 	var n int
 	var n int
 	switch level {
 	switch level {
-	case Fast:
-		n, _ = CompressBlock(src, data, ht)
+	case lz4block.Fast:
+		n, _ = lz4block.CompressBlock(src, data, ht)
 	default:
 	default:
-		n, _ = CompressBlockHC(src, data, level, ht)
+		n, _ = lz4block.CompressBlockHC(src, data, level, ht)
 	}
 	}
 	if n == 0 {
 	if n == 0 {
-		b.Size.uncompressedSet(true)
+		b.Size.UncompressedSet(true)
 		data = src
 		data = src
 	} else {
 	} else {
-		b.Size.uncompressedSet(false)
+		b.Size.UncompressedSet(false)
 		data = data[:n]
 		data = data[:n]
 	}
 	}
 	b.Data = data
 	b.Data = data
@@ -265,7 +273,7 @@ func (b *FrameDataBlock) compress(f *Frame, src []byte, ht []int, level Compress
 	return b
 	return b
 }
 }
 
 
-func (b *FrameDataBlock) write(f *Frame, dst io.Writer) error {
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
 	buf := f.buf[:]
 	buf := f.buf[:]
 	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
 	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
 	if _, err := dst.Write(buf[:4]); err != nil {
 	if _, err := dst.Write(buf[:4]); err != nil {
@@ -284,7 +292,7 @@ func (b *FrameDataBlock) write(f *Frame, dst io.Writer) error {
 	return err
 	return err
 }
 }
 
 
-func (b *FrameDataBlock) uncompress(f *Frame, src io.Reader, dst []byte) (int, error) {
+func (b *FrameDataBlock) Uncompress(f *Frame, src io.Reader, dst []byte) (int, error) {
 	buf := f.buf[:]
 	buf := f.buf[:]
 	var x uint32
 	var x uint32
 	if err := readUint32(src, buf, &x); err != nil {
 	if err := readUint32(src, buf, &x); err != nil {
@@ -296,7 +304,7 @@ func (b *FrameDataBlock) uncompress(f *Frame, src io.Reader, dst []byte) (int, e
 		return 0, io.EOF
 		return 0, io.EOF
 	}
 	}
 
 
-	isCompressed := !b.Size.uncompressed()
+	isCompressed := !b.Size.Uncompressed()
 	size := b.Size.size()
 	size := b.Size.size()
 	var data []byte
 	var data []byte
 	if isCompressed {
 	if isCompressed {
@@ -309,7 +317,7 @@ func (b *FrameDataBlock) uncompress(f *Frame, src io.Reader, dst []byte) (int, e
 		return 0, err
 		return 0, err
 	}
 	}
 	if isCompressed {
 	if isCompressed {
-		n, err := UncompressBlock(data, dst)
+		n, err := lz4block.UncompressBlock(data, dst)
 		if err != nil {
 		if err != nil {
 			return 0, err
 			return 0, err
 		}
 		}
@@ -321,7 +329,7 @@ func (b *FrameDataBlock) uncompress(f *Frame, src io.Reader, dst []byte) (int, e
 			return 0, err
 			return 0, err
 		}
 		}
 		if c := xxh32.ChecksumZero(data); c != b.Checksum {
 		if c := xxh32.ChecksumZero(data); c != b.Checksum {
-			return 0, fmt.Errorf("%w: got %x; expected %x", ErrInvalidBlockChecksum, c, b.Checksum)
+			return 0, fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
 		}
 		}
 	}
 	}
 	if f.Descriptor.Flags.ContentChecksum() {
 	if f.Descriptor.Flags.ContentChecksum() {

+ 13 - 11
frame_gen.go → internal/lz4stream/frame_gen.go

@@ -1,6 +1,8 @@
 // Code generated by `gen.exe`. DO NOT EDIT.
 // Code generated by `gen.exe`. DO NOT EDIT.
 
 
-package lz4
+package lz4stream
+
+import "github.com/pierrec/lz4/internal/lz4block"
 
 
 // DescriptorFlags is defined as follow:
 // DescriptorFlags is defined as follow:
 //   field              bits
 //   field              bits
@@ -17,12 +19,12 @@ package lz4
 type DescriptorFlags uint16
 type DescriptorFlags uint16
 
 
 // Getters.
 // Getters.
-func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 }
-func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 }
-func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 }
-func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
-func (x DescriptorFlags) Version() uint16 { return uint16(x>>6&0x3) }
-func (x DescriptorFlags) BlockSizeIndex() BlockSizeIndex { return BlockSizeIndex(x>>12&0x7) }
+func (x DescriptorFlags) ContentChecksum() bool              { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool                         { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool                { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool            { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16                    { return uint16(x>>6&0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { return lz4block.BlockSizeIndex(x>>12&0x7) }
 
 
 // Setters.
 // Setters.
 func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { const b = 1<<2; if v { *x = *x&^b | b } else { *x &^= b }; return x }
 func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { const b = 1<<2; if v { *x = *x&^b | b } else { *x &^= b }; return x }
@@ -30,7 +32,7 @@ func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { const b = 1<<3; if
 func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { const b = 1<<4; if v { *x = *x&^b | b } else { *x &^= b }; return x }
 func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { const b = 1<<4; if v { *x = *x&^b | b } else { *x &^= b }; return x }
 func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { const b = 1<<5; if v { *x = *x&^b | b } else { *x &^= b }; return x }
 func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { const b = 1<<5; if v { *x = *x&^b | b } else { *x &^= b }; return x }
 func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { *x = *x&^(0x3<<6) | (DescriptorFlags(v)&0x3<<6); return x }
 func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { *x = *x&^(0x3<<6) | (DescriptorFlags(v)&0x3<<6); return x }
-func (x *DescriptorFlags) BlockSizeIndexSet(v BlockSizeIndex) *DescriptorFlags { *x = *x&^(0x7<<12) | (DescriptorFlags(v)&0x7<<12); return x }
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { *x = *x&^(0x7<<12) | (DescriptorFlags(v)&0x7<<12); return x }
 // Code generated by `gen.exe`. DO NOT EDIT.
 // Code generated by `gen.exe`. DO NOT EDIT.
 
 
 // DataBlockSize is defined as follow:
 // DataBlockSize is defined as follow:
@@ -41,9 +43,9 @@ func (x *DescriptorFlags) BlockSizeIndexSet(v BlockSizeIndex) *DescriptorFlags {
 type DataBlockSize uint32
 type DataBlockSize uint32
 
 
 // Getters.
 // Getters.
-func (x DataBlockSize) size() int { return int(x&0x7FFFFFFF) }
-func (x DataBlockSize) uncompressed() bool { return x>>31&1 != 0 }
+func (x DataBlockSize) size() int          { return int(x&0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
 
 
 // Setters.
 // Setters.
 func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF; return x }
 func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF; return x }
-func (x *DataBlockSize) uncompressedSet(v bool) *DataBlockSize { const b = 1<<31; if v { *x = *x&^b | b } else { *x &^= b }; return x }
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { const b = 1<<31; if v { *x = *x&^b | b } else { *x &^= b }; return x }

+ 21 - 19
frame_test.go → internal/lz4stream/frame_test.go

@@ -1,10 +1,12 @@
-package lz4
+package lz4stream
 
 
 import (
 import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
 	"strings"
 	"strings"
 	"testing"
 	"testing"
+
+	"github.com/pierrec/lz4"
 )
 )
 
 
 func TestFrameDescriptor(t *testing.T) {
 func TestFrameDescriptor(t *testing.T) {
@@ -12,12 +14,12 @@ func TestFrameDescriptor(t *testing.T) {
 		flags             string
 		flags             string
 		bsum, csize, csum bool
 		bsum, csize, csum bool
 		size              uint64
 		size              uint64
-		bsize             BlockSize
+		bsize             lz4.BlockSize
 	}{
 	}{
-		{"\x64\x40\xa7", false, false, true, 0, Block64Kb},
-		{"\x64\x50\x08", false, false, true, 0, Block256Kb},
-		{"\x64\x60\x85", false, false, true, 0, Block1Mb},
-		{"\x64\x70\xb9", false, false, true, 0, Block4Mb},
+		{"\x64\x40\xa7", false, false, true, 0, lz4.Block64Kb},
+		{"\x64\x50\x08", false, false, true, 0, lz4.Block256Kb},
+		{"\x64\x60\x85", false, false, true, 0, lz4.Block1Mb},
+		{"\x64\x70\xb9", false, false, true, 0, lz4.Block4Mb},
 	} {
 	} {
 		s := tc.flags
 		s := tc.flags
 		label := fmt.Sprintf("%02x %02x %02x", s[0], s[1], s[2])
 		label := fmt.Sprintf("%02x %02x %02x", s[0], s[1], s[2])
@@ -46,10 +48,10 @@ func TestFrameDescriptor(t *testing.T) {
 			}
 			}
 
 
 			buf := new(bytes.Buffer)
 			buf := new(bytes.Buffer)
-			w := &Writer{src: buf}
+			w := lz4.NewWriter(buf)
 			fd.initW()
 			fd.initW()
 			fd.Checksum = 0
 			fd.Checksum = 0
-			if err := fd.write(f, w); err != nil {
+			if err := fd.Write(f, w); err != nil {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}
 			if got, want := buf.String(), tc.flags; got != want {
 			if got, want := buf.String(), tc.flags; got != want {
@@ -69,15 +71,15 @@ func TestFrameDataBlock(t *testing.T) {
 	}
 	}
 	for _, tc := range []struct {
 	for _, tc := range []struct {
 		data string
 		data string
-		size BlockSize
+		size lz4.BlockSize
 	}{
 	}{
-		{"", Block64Kb},
-		{sample, Block64Kb},
-		{strings.Repeat(sample, 10), Block64Kb},
-		{strings.Repeat(sample, 5000), Block256Kb},
-		{strings.Repeat(sample, 5000), Block1Mb},
-		{strings.Repeat(sample, 23000), Block1Mb},
-		{strings.Repeat(sample, 93000), Block4Mb},
+		{"", lz4.Block64Kb},
+		{sample, lz4.Block64Kb},
+		{strings.Repeat(sample, 10), lz4.Block64Kb},
+		{strings.Repeat(sample, 5000), lz4.Block256Kb},
+		{strings.Repeat(sample, 5000), lz4.Block1Mb},
+		{strings.Repeat(sample, 23000), lz4.Block1Mb},
+		{strings.Repeat(sample, 93000), lz4.Block4Mb},
 	} {
 	} {
 		label := fmt.Sprintf("%s (%d)", tc.data[:min(len(tc.data), 10)], len(tc.data))
 		label := fmt.Sprintf("%s (%d)", tc.data[:min(len(tc.data), 10)], len(tc.data))
 		t.Run(label, func(t *testing.T) {
 		t.Run(label, func(t *testing.T) {
@@ -87,13 +89,13 @@ func TestFrameDataBlock(t *testing.T) {
 			f := NewFrame()
 			f := NewFrame()
 
 
 			block := newFrameDataBlock(size.index())
 			block := newFrameDataBlock(size.index())
-			block.compress(f, []byte(data), nil, Fast)
-			if err := block.write(f, zbuf); err != nil {
+			block.Compress(f, []byte(data), nil, lz4.Fast)
+			if err := block.Write(f, zbuf); err != nil {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}
 
 
 			buf := make([]byte, size)
 			buf := make([]byte, size)
-			n, err := block.uncompress(f, zbuf, buf)
+			n, err := block.Uncompress(f, zbuf, buf)
 			if err != nil {
 			if err != nil {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}

+ 3 - 4
gen.go → internal/lz4stream/gen.go

@@ -6,7 +6,6 @@ import (
 	"log"
 	"log"
 	"os"
 	"os"
 
 
-	"github.com/pierrec/lz4"
 	"github.com/pierrec/packer"
 	"github.com/pierrec/packer"
 )
 )
 
 
@@ -20,13 +19,13 @@ type DescriptorFlags struct {
 	Version           [2]uint16
 	Version           [2]uint16
 	// BD
 	// BD
 	_              [4]int
 	_              [4]int
-	BlockSizeIndex [3]lz4.BlockSizeIndex
+	BlockSizeIndex [3]lz4block.BlockSizeIndex
 	_              [1]int
 	_              [1]int
 }
 }
 
 
 type DataBlockSize struct {
 type DataBlockSize struct {
 	size         [31]int
 	size         [31]int
-	uncompressed bool
+	Uncompressed bool
 }
 }
 
 
 func main() {
 func main() {
@@ -36,7 +35,7 @@ func main() {
 	}
 	}
 	defer out.Close()
 	defer out.Close()
 
 
-	pkg := "lz4"
+	pkg := "lz4stream"
 	for i, t := range []interface{}{
 	for i, t := range []interface{}{
 		DescriptorFlags{}, DataBlockSize{},
 		DescriptorFlags{}, DataBlockSize{},
 	} {
 	} {

+ 55 - 44
lz4.go

@@ -1,51 +1,62 @@
 package lz4
 package lz4
 
 
-const (
-	frameMagic     uint32 = 0x184D2204
-	frameSkipMagic uint32 = 0x184D2A50
-
-	// The following constants are used to setup the compression algorithm.
-	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
-	winSizeLog = 16 // LZ4 64Kb window size limit
-	winSize    = 1 << winSizeLog
-	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+import (
+	"github.com/pierrec/lz4/internal/lz4block"
+)
 
 
-	// hashLog determines the size of the hash table used to quickly find a previous match position.
-	// Its value influences the compression speed and memory usage, the lower the faster,
-	// but at the expense of the compression ratio.
-	// 16 seems to be the best compromise for fast compression.
-	hashLog = 16
-	htSize  = 1 << hashLog
+func _() {
+	// Safety checks for duplicated elements.
+	var x [1]struct{}
+	_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+	_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+	_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+	_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+	_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
 
 
-	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
-)
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return lz4block.CompressBlockBound(n)
+}
 
 
-type _error string
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst)
+}
 
 
-func (e _error) Error() string { return string(e) }
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
+	return lz4block.CompressBlock(src, dst, hashTable)
+}
 
 
-const (
-	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
-	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
-	ErrInvalidSourceShortBuffer _error = "lz4: invalid source or destination buffer too short"
-	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
-	ErrInvalidFrame _error = "lz4: bad magic number"
-	// ErrInternalUnhandledState is an internal error.
-	ErrInternalUnhandledState _error = "lz4: unhandled state"
-	// ErrInvalidHeaderChecksum is returned when reading a frame.
-	ErrInvalidHeaderChecksum _error = "lz4: invalid header checksum"
-	// ErrInvalidBlockChecksum is returned when reading a frame.
-	ErrInvalidBlockChecksum _error = "lz4: invalid block checksum"
-	// ErrInvalidFrameChecksum is returned when reading a frame.
-	ErrInvalidFrameChecksum _error = "lz4: invalid frame checksum"
-	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
-	ErrOptionInvalidCompressionLevel _error = "lz4: invalid compression level"
-	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
-	ErrOptionClosedOrError _error = "lz4: cannot apply options on closed or in error object"
-	// ErrOptionInvalidBlockSize is returned when
-	ErrOptionInvalidBlockSize _error = "lz4: invalid block size"
-	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
-	ErrOptionNotApplicable _error = "lz4: option not applicable"
-	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
-	ErrWriterNotClosed _error = "lz4: writer not closed"
-)
+// CompressBlockHC compresses the source buffer src into the destination dst
+// with max search depth (use 0 or negative value for no max).
+//
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
+	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth), hashTable)
+}

+ 22 - 82
options.go

@@ -4,7 +4,9 @@ import (
 	"fmt"
 	"fmt"
 	"reflect"
 	"reflect"
 	"runtime"
 	"runtime"
-	"sync"
+
+	"github.com/pierrec/lz4/internal/lz4block"
+	"github.com/pierrec/lz4/internal/lz4errors"
 )
 )
 
 
 //go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
 //go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
@@ -37,87 +39,25 @@ const (
 	Block4Mb
 	Block4Mb
 )
 )
 
 
-var (
-	blockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
-	blockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
-	blockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
-	blockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
-)
-
 // BlockSizeIndex defines the size of the blocks to be compressed.
 // BlockSizeIndex defines the size of the blocks to be compressed.
 type BlockSize uint32
 type BlockSize uint32
 
 
-func (b BlockSize) isValid() bool {
-	return b.index() > 0
-}
-
-func (b BlockSize) index() BlockSizeIndex {
-	switch b {
-	case Block64Kb:
-		return 4
-	case Block256Kb:
-		return 5
-	case Block1Mb:
-		return 6
-	case Block4Mb:
-		return 7
-	}
-	return 0
-}
-
-type BlockSizeIndex uint8
-
-func (b BlockSizeIndex) isValid() bool {
-	switch b {
-	case 4, 5, 6, 7:
-		return true
-	}
-	return false
-}
-
-func (b BlockSizeIndex) get() []byte {
-	var buf interface{}
-	switch b {
-	case 4:
-		buf = blockPool64K.Get()
-	case 5:
-		buf = blockPool256K.Get()
-	case 6:
-		buf = blockPool1M.Get()
-	case 7:
-		buf = blockPool4M.Get()
-	}
-	return buf.([]byte)
-}
-
-func (b BlockSizeIndex) put(buf []byte) {
-	switch b {
-	case 4:
-		blockPool64K.Put(buf)
-	case 5:
-		blockPool256K.Put(buf)
-	case 6:
-		blockPool1M.Put(buf)
-	case 7:
-		blockPool4M.Put(buf)
-	}
-}
-
 // BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
 // BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
 func BlockSizeOption(size BlockSize) Option {
 func BlockSizeOption(size BlockSize) Option {
 	return func(a applier) error {
 	return func(a applier) error {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("BlockSizeOption(%s)", size)
 			s := fmt.Sprintf("BlockSizeOption(%s)", size)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
-			if !size.isValid() {
-				return fmt.Errorf("%w: %d", ErrOptionInvalidBlockSize, size)
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
 			}
 			}
-			w.frame.Descriptor.Flags.BlockSizeIndexSet(size.index())
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -127,12 +67,12 @@ func BlockChecksumOption(flag bool) Option {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
 			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
 			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -142,12 +82,12 @@ func ChecksumOption(flag bool) Option {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
 			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
 			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -157,13 +97,13 @@ func SizeOption(size uint64) Option {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("SizeOption(%d)", size)
 			s := fmt.Sprintf("SizeOption(%d)", size)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			w.frame.Descriptor.Flags.SizeSet(size > 0)
 			w.frame.Descriptor.Flags.SizeSet(size > 0)
 			w.frame.Descriptor.ContentSize = size
 			w.frame.Descriptor.ContentSize = size
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -174,7 +114,7 @@ func ConcurrencyOption(n int) Option {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
 			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			switch n {
 			switch n {
 			case 0, 1:
 			case 0, 1:
@@ -186,7 +126,7 @@ func ConcurrencyOption(n int) Option {
 			w.num = n
 			w.num = n
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -212,17 +152,17 @@ func CompressionLevelOption(level CompressionLevel) Option {
 		switch w := a.(type) {
 		switch w := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
 			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			switch level {
 			switch level {
 			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
 			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
 			default:
 			default:
-				return fmt.Errorf("%w: %d", ErrOptionInvalidCompressionLevel, level)
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
 			}
 			}
-			w.level = level
+			w.level = lz4block.CompressionLevel(level)
 			return nil
 			return nil
 		}
 		}
-		return ErrOptionNotApplicable
+		return lz4errors.ErrOptionNotApplicable
 	}
 	}
 }
 }
 
 
@@ -237,7 +177,7 @@ func OnBlockDoneOption(handler func(size int)) Option {
 		switch rw := a.(type) {
 		switch rw := a.(type) {
 		case nil:
 		case nil:
 			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
 			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
-			return _error(s)
+			return lz4errors.Error(s)
 		case *Writer:
 		case *Writer:
 			rw.handler = handler
 			rw.handler = handler
 		case *Reader:
 		case *Reader:

+ 15 - 12
reader.go

@@ -2,6 +2,9 @@ package lz4
 
 
 import (
 import (
 	"io"
 	"io"
+
+	"github.com/pierrec/lz4/internal/lz4errors"
+	"github.com/pierrec/lz4/internal/lz4stream"
 )
 )
 
 
 var readerStates = []aState{
 var readerStates = []aState{
@@ -14,7 +17,7 @@ var readerStates = []aState{
 
 
 // NewReader returns a new LZ4 frame decoder.
 // NewReader returns a new LZ4 frame decoder.
 func NewReader(r io.Reader) *Reader {
 func NewReader(r io.Reader) *Reader {
-	zr := &Reader{frame: NewFrame()}
+	zr := &Reader{frame: lz4stream.NewFrame()}
 	zr.state.init(readerStates)
 	zr.state.init(readerStates)
 	_ = zr.Apply(defaultOnBlockDone)
 	_ = zr.Apply(defaultOnBlockDone)
 	return zr.Reset(r)
 	return zr.Reset(r)
@@ -22,10 +25,10 @@ func NewReader(r io.Reader) *Reader {
 
 
 type Reader struct {
 type Reader struct {
 	state   _State
 	state   _State
-	src     io.Reader // source reader
-	frame   *Frame    // frame being read
-	data    []byte    // pending data
-	idx     int       // size of pending data
+	src     io.Reader        // source reader
+	frame   *lz4stream.Frame // frame being read
+	data    []byte           // pending data
+	idx     int              // size of pending data
 	handler func(int)
 	handler func(int)
 }
 }
 
 
@@ -38,7 +41,7 @@ func (r *Reader) Apply(options ...Option) (err error) {
 	case errorState:
 	case errorState:
 		return r.state.err
 		return r.state.err
 	default:
 	default:
-		return ErrOptionClosedOrError
+		return lz4errors.ErrOptionClosedOrError
 	}
 	}
 	for _, o := range options {
 	for _, o := range options {
 		if err = o(r); err != nil {
 		if err = o(r); err != nil {
@@ -67,10 +70,10 @@ func (r *Reader) Read(buf []byte) (n int, err error) {
 		return 0, r.state.err
 		return 0, r.state.err
 	case newState:
 	case newState:
 		// First initialization.
 		// First initialization.
-		if err = r.frame.initR(r.src); r.state.next(err) {
+		if err = r.frame.InitR(r.src); r.state.next(err) {
 			return
 			return
 		}
 		}
-		r.data = r.frame.Descriptor.Flags.BlockSizeIndex().get()
+		r.data = r.frame.Descriptor.Flags.BlockSizeIndex().Get()
 	default:
 	default:
 		return 0, r.state.fail()
 		return 0, r.state.fail()
 	}
 	}
@@ -87,7 +90,7 @@ func (r *Reader) Read(buf []byte) (n int, err error) {
 	r.data = r.data[:cap(r.data)]
 	r.data = r.data[:cap(r.data)]
 	for len(buf) >= len(r.data) {
 	for len(buf) >= len(r.data) {
 		// Input buffer large enough and no pending data: uncompress directly into it.
 		// Input buffer large enough and no pending data: uncompress directly into it.
-		switch bn, err = r.frame.Blocks.Block.uncompress(r.frame, r.src, buf); err {
+		switch bn, err = r.frame.Blocks.Block.Uncompress(r.frame, r.src, buf); err {
 		case nil:
 		case nil:
 			r.handler(bn)
 			r.handler(bn)
 			n += bn
 			n += bn
@@ -103,7 +106,7 @@ func (r *Reader) Read(buf []byte) (n int, err error) {
 		return
 		return
 	}
 	}
 	// Read the next block.
 	// Read the next block.
-	switch bn, err = r.frame.Blocks.Block.uncompress(r.frame, r.src, r.data); err {
+	switch bn, err = r.frame.Blocks.Block.Uncompress(r.frame, r.src, r.data); err {
 	case nil:
 	case nil:
 		r.handler(bn)
 		r.handler(bn)
 		r.data = r.data[:bn]
 		r.data = r.data[:bn]
@@ -115,10 +118,10 @@ func (r *Reader) Read(buf []byte) (n int, err error) {
 close:
 close:
 	r.handler(bn)
 	r.handler(bn)
 	n += bn
 	n += bn
-	if er := r.frame.closeR(r.src); er != nil {
+	if er := r.frame.CloseR(r.src); er != nil {
 		err = er
 		err = er
 	}
 	}
-	r.frame.Descriptor.Flags.BlockSizeIndex().put(r.data)
+	r.frame.Descriptor.Flags.BlockSizeIndex().Put(r.data)
 	r.reset(nil)
 	r.reset(nil)
 	return
 	return
 fillbuf:
 fillbuf:

+ 3 - 1
state.go

@@ -4,6 +4,8 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+
+	"github.com/pierrec/lz4/internal/lz4errors"
 )
 )
 
 
 //go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
 //go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
@@ -58,6 +60,6 @@ func (s *_State) check(errp *error) {
 
 
 func (s *_State) fail() error {
 func (s *_State) fail() error {
 	s.state = errorState
 	s.state = errorState
-	s.err = fmt.Errorf("%w[%s]", ErrInternalUnhandledState, s.state)
+	s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
 	return s.err
 	return s.err
 }
 }

+ 33 - 27
writer.go

@@ -1,6 +1,12 @@
 package lz4
 package lz4
 
 
-import "io"
+import (
+	"io"
+
+	"github.com/pierrec/lz4/internal/lz4block"
+	"github.com/pierrec/lz4/internal/lz4errors"
+	"github.com/pierrec/lz4/internal/lz4stream"
+)
 
 
 var writerStates = []aState{
 var writerStates = []aState{
 	noState:     newState,
 	noState:     newState,
@@ -12,7 +18,7 @@ var writerStates = []aState{
 
 
 // NewWriter returns a new LZ4 frame encoder.
 // NewWriter returns a new LZ4 frame encoder.
 func NewWriter(w io.Writer) *Writer {
 func NewWriter(w io.Writer) *Writer {
-	zw := &Writer{frame: NewFrame()}
+	zw := &Writer{frame: lz4stream.NewFrame()}
 	zw.state.init(writerStates)
 	zw.state.init(writerStates)
 	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
 	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
 	return zw.Reset(w)
 	return zw.Reset(w)
@@ -20,13 +26,13 @@ func NewWriter(w io.Writer) *Writer {
 
 
 type Writer struct {
 type Writer struct {
 	state   _State
 	state   _State
-	src     io.Writer        // destination writer
-	level   CompressionLevel // how hard to try
-	num     int              // concurrency level
-	frame   *Frame           // frame being built
-	ht      []int            // hash table (set if no concurrency)
-	data    []byte           // pending data
-	idx     int              // size of pending data
+	src     io.Writer                 // destination writer
+	level   lz4block.CompressionLevel // how hard to try
+	num     int                       // concurrency level
+	frame   *lz4stream.Frame          // frame being built
+	ht      []int                     // hash table (set if no concurrency)
+	data    []byte                    // pending data
+	idx     int                       // size of pending data
 	handler func(int)
 	handler func(int)
 }
 }
 
 
@@ -39,7 +45,7 @@ func (w *Writer) Apply(options ...Option) (err error) {
 	case errorState:
 	case errorState:
 		return w.state.err
 		return w.state.err
 	default:
 	default:
-		return ErrOptionClosedOrError
+		return lz4errors.ErrOptionClosedOrError
 	}
 	}
 	for _, o := range options {
 	for _, o := range options {
 		if err = o(w); err != nil {
 		if err = o(w); err != nil {
@@ -61,7 +67,7 @@ func (w *Writer) Write(buf []byte) (n int, err error) {
 	case closedState, errorState:
 	case closedState, errorState:
 		return 0, w.state.err
 		return 0, w.state.err
 	case newState:
 	case newState:
-		if err = w.frame.Descriptor.write(w.frame, w.src); w.state.next(err) {
+		if err = w.frame.Descriptor.Write(w.frame, w.src); w.state.next(err) {
 			return
 			return
 		}
 		}
 	default:
 	default:
@@ -103,28 +109,28 @@ func (w *Writer) write(data []byte, direct bool) error {
 	if w.isNotConcurrent() {
 	if w.isNotConcurrent() {
 		defer w.handler(len(data))
 		defer w.handler(len(data))
 		block := w.frame.Blocks.Block
 		block := w.frame.Blocks.Block
-		return block.compress(w.frame, data, w.ht, w.level).write(w.frame, w.src)
+		return block.Compress(w.frame, data, w.ht, w.level).Write(w.frame, w.src)
 	}
 	}
 	size := w.frame.Descriptor.Flags.BlockSizeIndex()
 	size := w.frame.Descriptor.Flags.BlockSizeIndex()
-	c := make(chan *FrameDataBlock)
+	c := make(chan *lz4stream.FrameDataBlock)
 	w.frame.Blocks.Blocks <- c
 	w.frame.Blocks.Blocks <- c
-	go func(c chan *FrameDataBlock, data []byte, size BlockSizeIndex) {
+	go func(c chan *lz4stream.FrameDataBlock, data []byte, size lz4block.BlockSizeIndex) {
 		defer w.handler(len(data))
 		defer w.handler(len(data))
-		b := newFrameDataBlock(size)
+		b := lz4stream.NewFrameDataBlock(size)
 		zdata := b.Data
 		zdata := b.Data
-		c <- b.compress(w.frame, data, nil, w.level)
+		c <- b.Compress(w.frame, data, nil, w.level)
 		// Wait for the compressed or uncompressed data to no longer be in use
 		// Wait for the compressed or uncompressed data to no longer be in use
 		// and free the allocated buffers
 		// and free the allocated buffers
-		if b.Size.uncompressed() {
+		if b.Size.Uncompressed() {
 			zdata, data = data, zdata
 			zdata, data = data, zdata
 		}
 		}
-		size.put(data)
+		size.Put(data)
 		<-c
 		<-c
-		size.put(zdata)
+		size.Put(zdata)
 	}(c, data, size)
 	}(c, data, size)
 
 
 	if direct {
 	if direct {
-		w.data = size.get()
+		w.data = size.Get()
 	}
 	}
 
 
 	return nil
 	return nil
@@ -149,12 +155,12 @@ func (w *Writer) Close() (err error) {
 		w.idx = 0
 		w.idx = 0
 	}
 	}
 	if w.isNotConcurrent() {
 	if w.isNotConcurrent() {
-		htPool.Put(w.ht)
+		lz4block.HashTablePool.Put(w.ht)
 		size := w.frame.Descriptor.Flags.BlockSizeIndex()
 		size := w.frame.Descriptor.Flags.BlockSizeIndex()
-		size.put(w.data)
+		size.Put(w.data)
 		w.data = nil
 		w.data = nil
 	}
 	}
-	return w.frame.closeW(w.src, w.num)
+	return w.frame.CloseW(w.src, w.num)
 }
 }
 
 
 // Reset clears the state of the Writer w such that it is equivalent to its
 // Reset clears the state of the Writer w such that it is equivalent to its
@@ -167,17 +173,17 @@ func (w *Writer) Reset(writer io.Writer) *Writer {
 	switch w.state.state {
 	switch w.state.state {
 	case newState, closedState, errorState:
 	case newState, closedState, errorState:
 	default:
 	default:
-		panic(ErrWriterNotClosed)
+		panic(lz4errors.ErrWriterNotClosed)
 	}
 	}
 	w.state.state = noState
 	w.state.state = noState
 	w.state.next(nil)
 	w.state.next(nil)
 	w.src = writer
 	w.src = writer
-	w.frame.initW(w.src, w.num)
+	w.frame.InitW(w.src, w.num)
 	size := w.frame.Descriptor.Flags.BlockSizeIndex()
 	size := w.frame.Descriptor.Flags.BlockSizeIndex()
-	w.data = size.get()
+	w.data = size.Get()
 	w.idx = 0
 	w.idx = 0
 	if w.isNotConcurrent() {
 	if w.isNotConcurrent() {
-		w.ht = htPool.Get().([]int)
+		w.ht = lz4block.HashTablePool.Get().([]int)
 	}
 	}
 	return w
 	return w
 }
 }

+ 6 - 5
writer_test.go

@@ -10,6 +10,7 @@ import (
 	"testing"
 	"testing"
 
 
 	"github.com/pierrec/lz4"
 	"github.com/pierrec/lz4"
+	"github.com/pierrec/lz4/internal/lz4block"
 )
 )
 
 
 func TestWriter(t *testing.T) {
 func TestWriter(t *testing.T) {
@@ -129,14 +130,14 @@ func TestIssue51(t *testing.T) {
 
 
 	zbuf := make([]byte, 8192)
 	zbuf := make([]byte, 8192)
 
 
-	n, err := lz4.CompressBlock(data, zbuf, nil)
+	n, err := lz4block.CompressBlock(data, zbuf, nil)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	zbuf = zbuf[:n]
 	zbuf = zbuf[:n]
 
 
 	buf := make([]byte, 8192)
 	buf := make([]byte, 8192)
-	n, err = lz4.UncompressBlock(zbuf, buf)
+	n, err = lz4block.UncompressBlock(zbuf, buf)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -153,11 +154,11 @@ func TestIssue71(t *testing.T) {
 	} {
 	} {
 		t.Run(tc, func(t *testing.T) {
 		t.Run(tc, func(t *testing.T) {
 			src := []byte(tc)
 			src := []byte(tc)
-			bound := lz4.CompressBlockBound(len(tc))
+			bound := lz4block.CompressBlockBound(len(tc))
 
 
 			// Small buffer.
 			// Small buffer.
 			zSmall := make([]byte, bound-1)
 			zSmall := make([]byte, bound-1)
-			n, err := lz4.CompressBlock(src, zSmall, nil)
+			n, err := lz4block.CompressBlock(src, zSmall, nil)
 			if err != nil {
 			if err != nil {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}
@@ -167,7 +168,7 @@ func TestIssue71(t *testing.T) {
 
 
 			// Large enough buffer.
 			// Large enough buffer.
 			zLarge := make([]byte, bound)
 			zLarge := make([]byte, bound)
-			n, err = lz4.CompressBlock(src, zLarge, nil)
+			n, err = lz4block.CompressBlock(src, zLarge, nil)
 			if err != nil {
 			if err != nil {
 				t.Fatal(err)
 				t.Fatal(err)
 			}
 			}