Browse Source

Writer: made compatible with no map scheme for block max size

Pierre Curto 6 years ago
parent
commit
d8871004e9
3 changed files with 15 additions and 15 deletions
  1. 3 0
      cmd/lz4c/compress.go
  2. 7 10
      lz4.go
  3. 5 5
      writer.go

+ 3 - 0
cmd/lz4c/compress.go

@@ -23,6 +23,8 @@ func Compress(fs *flag.FlagSet) cmdflag.Handler {
 	fs.BoolVar(&streamChecksum, "sc", false, "disable stream checksum")
 	var level int
 	fs.IntVar(&level, "l", 0, "compression level (0=fastest)")
+	var concurrency int
+	fs.IntVar(&concurrency, "c", -1, "concurrency (default=all CPUs")
 
 	return func(args ...string) (int, error) {
 		sz, err := bytefmt.ToBytes(blockMaxSize)
@@ -37,6 +39,7 @@ func Compress(fs *flag.FlagSet) cmdflag.Handler {
 			NoChecksum:       streamChecksum,
 			CompressionLevel: level,
 		}
+		zw.WithConcurrency(concurrency)
 
 		// Use stdin/stdout if no file provided.
 		if len(args) == 0 {

+ 7 - 10
lz4.go

@@ -50,16 +50,12 @@ const (
 )
 
 var (
-	bsMapID = map[byte]int{4: blockSize64K, 5: blockSize256K, 6: blockSize1M, 7: blockSize4M}
 	// Keep a pool of buffers for each valid block sizes.
-	bsMapValue = map[int]struct {
-		byte
-		*sync.Pool
-	}{
-		blockSize64K:  {4, newBufferPool(2 * blockSize64K)},
-		blockSize256K: {5, newBufferPool(2 * blockSize256K)},
-		blockSize1M:   {6, newBufferPool(2 * blockSize1M)},
-		blockSize4M:   {7, newBufferPool(2 * blockSize4M)},
+	bsMapValue = [...]*sync.Pool{
+		newBufferPool(2 * blockSize64K),
+		newBufferPool(2 * blockSize256K),
+		newBufferPool(2 * blockSize1M),
+		newBufferPool(2 * blockSize4M),
 	}
 )
 
@@ -75,7 +71,8 @@ func newBufferPool(size int) *sync.Pool {
 // putBuffer returns a buffer to its pool.
 func putBuffer(size int, buf []byte) {
 	if cap(buf) > 0 {
-		bsMapValue[size].Pool.Put(buf[:cap(buf)])
+		idx := blockSizeValueToIndex(size) - 4
+		bsMapValue[idx].Put(buf[:cap(buf)])
 	}
 }
 func blockSizeIndexToValue(i byte) int {

+ 5 - 5
writer.go

@@ -93,7 +93,8 @@ func (z *Writer) WithConcurrency(n int) *Writer {
 // The returned buffers are for decompression and compression respectively.
 func (z *Writer) newBuffers() {
 	bSize := z.Header.BlockMaxSize
-	buf := bsMapValue[bSize].Pool.Get().([]byte)
+	idx := blockSizeValueToIndex(bSize) - 4
+	buf := bsMapValue[idx].Get().([]byte)
 	z.data = buf[:bSize] // Uncompressed buffer is the first half.
 }
 
@@ -108,12 +109,11 @@ func (z *Writer) freeBuffers() {
 func (z *Writer) writeHeader() error {
 	// Default to 4Mb if BlockMaxSize is not set.
 	if z.Header.BlockMaxSize == 0 {
-		z.Header.BlockMaxSize = bsMapID[7]
+		z.Header.BlockMaxSize = blockSize4M
 	}
 	// The only option that needs to be validated.
 	bSize := z.Header.BlockMaxSize
-	m, ok := bsMapValue[bSize]
-	if !ok {
+	if !isValidBlockSize(z.Header.BlockMaxSize) {
 		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
 	}
 	// Allocate the compressed/uncompressed buffers.
@@ -138,7 +138,7 @@ func (z *Writer) writeHeader() error {
 		flg |= 1 << 2
 	}
 	buf[4] = flg
-	buf[5] = m.byte << 4
+	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
 
 	// Current buffer size: magic(4) + flags(1) + block max size (1).
 	n := 6