Browse Source

reverted CompressBlockHC to use empty hashTable and chainTable

Pierre.Curto 5 years ago
parent
commit
866e977adb
5 changed files with 22 additions and 34 deletions
  1. 1 1
      bench_test.go
  2. 4 15
      internal/lz4block/block.go
  3. 15 16
      internal/lz4block/block_test.go
  4. 1 1
      internal/lz4stream/frame.go
  5. 1 1
      lz4.go

+ 1 - 1
bench_test.go

@@ -40,7 +40,7 @@ func BenchmarkCompressHC(b *testing.B) {
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	for i := 0; i < b.N; i++ {
 	for i := 0; i < b.N; i++ {
-		_, _ = lz4block.CompressBlockHC(pg1661, buf, 16, nil)
+		_, _ = lz4block.CompressBlockHC(pg1661, buf, 16)
 	}
 	}
 }
 }
 
 

+ 4 - 15
internal/lz4block/block.go

@@ -239,7 +239,7 @@ func blockHashHC(x uint32) uint32 {
 	return x * hasher >> (32 - winSizeLog)
 	return x * hasher >> (32 - winSizeLog)
 }
 }
 
 
-func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (_ int, err error) {
 	defer recoverBlock(&err)
 	defer recoverBlock(&err)
 
 
 	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
 	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
@@ -250,26 +250,16 @@ func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (
 	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
 	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
 	const adaptSkipLog = 7
 	const adaptSkipLog = 7
 
 
-	var chainTable []int
 	var si, di, anchor int
 	var si, di, anchor int
 	// hashTable: stores the last position found for a given hash
 	// hashTable: stores the last position found for a given hash
 	// chainTable: stores previous positions for a given hash
 	// chainTable: stores previous positions for a given hash
+	var hashTable, chainTable [winSize]int
+
 	sn := len(src) - mfLimit
 	sn := len(src) - mfLimit
 	if sn <= 0 {
 	if sn <= 0 {
 		goto lastLiterals
 		goto lastLiterals
 	}
 	}
 
 
-	if cap(hashTable) < htSize {
-		hashTable = HashTablePool.Get().([]int)
-		defer HashTablePool.Put(hashTable)
-	} else {
-		hashTable = hashTable[:htSize]
-	}
-	_ = hashTable[htSize-1]
-	chainTable = HashTablePool.Get().([]int)
-	defer HashTablePool.Put(chainTable)
-	_ = chainTable[htSize-1]
-
 	if depth <= 0 {
 	if depth <= 0 {
 		depth = winSize
 		depth = winSize
 	}
 	}
@@ -282,7 +272,7 @@ func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (
 		// Follow the chain until out of window and give the longest match.
 		// Follow the chain until out of window and give the longest match.
 		mLen := 0
 		mLen := 0
 		offset := 0
 		offset := 0
-		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
+		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = chainTable[next&winMask], try-1 {
 			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
 			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
 			// must match to improve on the match length.
 			// must match to improve on the match length.
 			if src[next+mLen] != src[si+mLen] {
 			if src[next+mLen] != src[si+mLen] {
@@ -308,7 +298,6 @@ func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (
 			mLen = ml
 			mLen = ml
 			offset = si - next
 			offset = si - next
 			// Try another previous position with the same hash.
 			// Try another previous position with the same hash.
-			try--
 		}
 		}
 		chainTable[si&winMask] = hashTable[h]
 		chainTable[si&winMask] = hashTable[h]
 		hashTable[h] = si
 		hashTable[h] = si

+ 15 - 16
internal/lz4block/block_test.go

@@ -20,14 +20,14 @@ type testcase struct {
 
 
 var rawFiles = []testcase{
 var rawFiles = []testcase{
 	// {"testdata/207326ba-36f8-11e7-954a-aca46ba8ca73.png", true, nil},
 	// {"testdata/207326ba-36f8-11e7-954a-aca46ba8ca73.png", true, nil},
-	{"testdata/e.txt", false, nil},
-	{"testdata/gettysburg.txt", true, nil},
-	{"testdata/Mark.Twain-Tom.Sawyer.txt", true, nil},
-	{"testdata/pg1661.txt", true, nil},
-	{"testdata/pi.txt", false, nil},
-	{"testdata/random.data", false, nil},
-	{"testdata/repeat.txt", true, nil},
-	{"testdata/pg1661.txt", true, nil},
+	{"../../testdata/e.txt", false, nil},
+	{"../../testdata/gettysburg.txt", true, nil},
+	{"../../testdata/Mark.Twain-Tom.Sawyer.txt", true, nil},
+	{"../../testdata/pg1661.txt", true, nil},
+	{"../../testdata/pi.txt", false, nil},
+	{"../../testdata/random.data", false, nil},
+	{"../../testdata/repeat.txt", true, nil},
+	{"../../testdata/pg1661.txt", true, nil},
 }
 }
 
 
 func TestCompressUncompressBlock(t *testing.T) {
 func TestCompressUncompressBlock(t *testing.T) {
@@ -101,12 +101,11 @@ func TestCompressUncompressBlock(t *testing.T) {
 					return lz4block.CompressBlock(src, dst, nil)
 					return lz4block.CompressBlock(src, dst, nil)
 				})
 				})
 			})
 			})
-			//TODO
-			//t.Run(fmt.Sprintf("%s HC", tc.file), func(t *testing.T) {
-			//	nhc = run(t, tc, func(src, dst []byte) (int, error) {
-			//		return lz4.CompressBlockHC(src, dst, 16, nil)
-			//	})
-			//})
+			t.Run(fmt.Sprintf("%s HC", tc.file), func(t *testing.T) {
+				nhc = run(t, tc, func(src, dst []byte) (int, error) {
+					return lz4.CompressBlockHC(src, dst, 10, nil)
+				})
+			})
 		})
 		})
 		if !t.Failed() {
 		if !t.Failed() {
 			t.Logf("%-40s: %8d / %8d / %8d\n", tc.file, n, nhc, len(src))
 			t.Logf("%-40s: %8d / %8d / %8d\n", tc.file, n, nhc, len(src))
@@ -130,7 +129,7 @@ func TestCompressCornerCase_CopyDstUpperBound(t *testing.T) {
 		}
 		}
 	}
 	}
 
 
-	file := "testdata/upperbound.data"
+	file := "../../testdata/upperbound.data"
 	src, err := ioutil.ReadFile(file)
 	src, err := ioutil.ReadFile(file)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
@@ -145,7 +144,7 @@ func TestCompressCornerCase_CopyDstUpperBound(t *testing.T) {
 	t.Run(fmt.Sprintf("%s HC", file), func(t *testing.T) {
 	t.Run(fmt.Sprintf("%s HC", file), func(t *testing.T) {
 		t.Parallel()
 		t.Parallel()
 		run(src, func(src, dst []byte) (int, error) {
 		run(src, func(src, dst []byte) (int, error) {
-			return lz4block.CompressBlockHC(src, dst, 16, nil)
+			return lz4block.CompressBlockHC(src, dst, 16)
 		})
 		})
 	})
 	})
 }
 }

+ 1 - 1
internal/lz4stream/frame.go

@@ -258,7 +258,7 @@ func (b *FrameDataBlock) Compress(f *Frame, src []byte, ht []int, level lz4block
 	case lz4block.Fast:
 	case lz4block.Fast:
 		n, _ = lz4block.CompressBlock(src, data, ht)
 		n, _ = lz4block.CompressBlock(src, data, ht)
 	default:
 	default:
-		n, _ = lz4block.CompressBlockHC(src, data, level, ht)
+		n, _ = lz4block.CompressBlockHC(src, data, level)
 	}
 	}
 	if n == 0 {
 	if n == 0 {
 		b.Size.UncompressedSet(true)
 		b.Size.UncompressedSet(true)

+ 1 - 1
lz4.go

@@ -59,7 +59,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
 //
 //
 // An error is returned if the destination buffer is too small.
 // An error is returned if the destination buffer is too small.
 func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
 func CompressBlockHC(src, dst []byte, depth CompressionLevel, hashTable []int) (_ int, err error) {
-	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth), hashTable)
+	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
 }
 }
 
 
 const (
 const (