1
0
mirror of https://github.com/StackExchange/dnscontrol.git synced 2024-05-11 05:55:12 +00:00

Switch from govendor to go modules. (#587)

Thanks to @BenoitKnecht for leading the way on this.
This commit is contained in:
Tom Limoncelli
2020-01-18 14:40:28 -05:00
committed by GitHub
parent 31188c3a70
commit 16d0043cce
1554 changed files with 400867 additions and 98222 deletions

View File

@@ -2,7 +2,15 @@ package lz4
import (
"encoding/binary"
"math/bits"
"errors"
)
var (
// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
// block is corrupted or the destination buffer is not large enough for the uncompressed data.
ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
// ErrInvalid is returned when reading an invalid LZ4 archive.
ErrInvalid = errors.New("lz4: bad magic number")
)
// blockHash hashes 4 bytes into a value < winSize.
@@ -22,14 +30,75 @@ func CompressBlockBound(n int) int {
// The destination buffer must be sized appropriately.
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlock(src, dst []byte) (int, error) {
if len(src) == 0 {
func UncompressBlock(src, dst []byte) (si int, err error) {
defer func() {
// It is now faster to let the runtime panic and recover on out of bound slice access
// than checking indices as we go along.
if recover() != nil {
err = ErrInvalidSourceShortBuffer
}
}()
sn := len(src)
if sn == 0 {
return 0, nil
}
if di := decodeBlock(dst, src); di >= 0 {
return di, nil
var di int
for {
// Literals and match lengths (token).
b := int(src[si])
si++
// Literals.
if lLen := b >> 4; lLen > 0 {
if lLen == 0xF {
for src[si] == 0xFF {
lLen += 0xFF
si++
}
lLen += int(src[si])
si++
}
i := si
si += lLen
di += copy(dst[di:], src[i:si])
if si >= sn {
return di, nil
}
}
si++
_ = src[si] // Bound check elimination.
offset := int(src[si-1]) | int(src[si])<<8
si++
// Match.
mLen := b & 0xF
if mLen == 0xF {
for src[si] == 0xFF {
mLen += 0xFF
si++
}
mLen += int(src[si])
si++
}
mLen += minMatch
// Copy the match.
i := di - offset
if offset > 0 && mLen >= offset {
// Efficiently copy the match dst[di-offset:di] into the dst slice.
bytesToCopy := offset * (mLen / offset)
expanded := dst[i:]
for n := offset; n <= bytesToCopy+offset; n *= 2 {
copy(expanded[n:], expanded[:n])
}
di += bytesToCopy
mLen -= bytesToCopy
}
di += copy(dst[di:], dst[i:i+mLen])
}
return 0, ErrInvalidSourceShortBuffer
}
// CompressBlock compresses the source buffer into the destination one.
@@ -40,12 +109,11 @@ func UncompressBlock(src, dst []byte) (int, error) {
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
defer recoverBlock(&err)
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
defer func() {
if recover() != nil {
err = ErrInvalidSourceShortBuffer
}
}()
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 {
@@ -54,8 +122,10 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
var si int
// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
// const accInit = 1 << skipStrength
anchor := si // Position of the current literals.
// acc := accInit // Variable step: improves performance on non-compressible data.
for si < sn {
// Hash the next 4 bytes (sequence)...
@@ -65,13 +135,15 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
ref := hashTable[h]
hashTable[h] = si
if ref >= sn { // Invalid reference (dirty hashtable).
si += 1 + (si-anchor)>>adaptSkipLog
si++
continue
}
offset := si - ref
if offset <= 0 || offset >= winSize || // Out of window.
match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
si += 1 + (si-anchor)>>adaptSkipLog
// si += acc >> skipStrength
// acc++
si++
continue
}
@@ -83,15 +155,12 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
si += minMatch
mLen := si // Match length has minMatch already.
// Find the longest match, first looking by batches of 8 bytes.
for si < sn {
x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
if x == 0 {
si += 8
} else {
// Stop is first non-zero byte.
si += bits.TrailingZeros64(x) >> 3
break
}
for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) {
si += 8
}
// Then byte by byte.
for si < sn && src[si] == src[si-offset] {
si++
}
mLen = si - mLen
@@ -117,7 +186,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
di++
// Literals.
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
copy(dst[di:], src[anchor:anchor+lLen])
di += lLen + 2
anchor = si
@@ -161,7 +230,7 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
// Incompressible.
return 0, nil
}
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
di += copy(dst[di:], src[anchor:])
return di, nil
}
@@ -174,12 +243,11 @@ func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
//
// An error is returned if the destination buffer is too small.
func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
defer recoverBlock(&err)
// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
// This significantly speeds up incompressible data and usually has very small impact on compresssion.
// bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
const adaptSkipLog = 7
defer func() {
if recover() != nil {
err = ErrInvalidSourceShortBuffer
}
}()
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 {
@@ -188,7 +256,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
var si int
// hashTable: stores the last position found for a given hash
// chainTable: stores previous positions for a given hash
// chaingTable: stores previous positions for a given hash
var hashTable, chainTable [winSize]int
if depth <= 0 {
@@ -212,17 +280,13 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
}
ml := 0
// Compare the current position with a previous with the same hash.
for ml < sn-si {
x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
if x == 0 {
ml += 8
} else {
// Stop is first non-zero byte.
ml += bits.TrailingZeros64(x) >> 3
break
}
for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) {
ml += 8
}
if ml < minMatch || ml <= mLen {
for ml < sn-si && src[next+ml] == src[si+ml] {
ml++
}
if ml+1 < minMatch || ml <= mLen {
// Match too small (<minMath) or smaller than the current match.
continue
}
@@ -237,7 +301,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
// No match found.
if mLen == 0 {
si += 1 + (si-anchor)>>adaptSkipLog
si++
continue
}
@@ -283,7 +347,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
di++
// Literals.
copy(dst[di:di+lLen], src[anchor:anchor+lLen])
copy(dst[di:], src[anchor:anchor+lLen])
di += lLen
anchor = si
@@ -328,6 +392,6 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
// Incompressible.
return 0, nil
}
di += copy(dst[di:di+len(src)-anchor], src[anchor:])
di += copy(dst[di:], src[anchor:])
return di, nil
}