mirror of
https://github.com/mudler/luet.git
synced 2025-09-13 05:42:52 +00:00
🔧 Update modules
This commit is contained in:
32
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
32
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@@ -152,7 +152,7 @@ file out level insize outsize millis mb/s
|
||||
silesia.tar zskp 1 211947520 73101992 643 313.87
|
||||
silesia.tar zskp 2 211947520 67504318 969 208.38
|
||||
silesia.tar zskp 3 211947520 64595893 2007 100.68
|
||||
silesia.tar zskp 4 211947520 60995370 7691 26.28
|
||||
silesia.tar zskp 4 211947520 60995370 8825 22.90
|
||||
|
||||
cgo zstd:
|
||||
silesia.tar zstd 1 211947520 73605392 543 371.56
|
||||
@@ -162,7 +162,7 @@ silesia.tar zstd 9 211947520 60212393 5063 39.92
|
||||
|
||||
gzip, stdlib/this package:
|
||||
silesia.tar gzstd 1 211947520 80007735 1654 122.21
|
||||
silesia.tar gzkp 1 211947520 80369488 1168 173.06
|
||||
silesia.tar gzkp 1 211947520 80136201 1152 175.45
|
||||
|
||||
GOB stream of binary data. Highly compressible.
|
||||
https://files.klauspost.com/compress/gob-stream.7z
|
||||
@@ -171,13 +171,15 @@ file out level insize outsize millis mb/s
|
||||
gob-stream zskp 1 1911399616 235022249 3088 590.30
|
||||
gob-stream zskp 2 1911399616 205669791 3786 481.34
|
||||
gob-stream zskp 3 1911399616 175034659 9636 189.17
|
||||
gob-stream zskp 4 1911399616 167273881 29337 62.13
|
||||
gob-stream zskp 4 1911399616 165609838 50369 36.19
|
||||
|
||||
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
||||
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
||||
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
||||
gob-stream zstd 9 1911399616 177620386 16175 112.70
|
||||
|
||||
gob-stream gzstd 1 1911399616 357382641 10251 177.82
|
||||
gob-stream gzkp 1 1911399616 362156523 5695 320.08
|
||||
gob-stream gzkp 1 1911399616 359753026 5438 335.20
|
||||
|
||||
The test data for the Large Text Compression Benchmark is the first
|
||||
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
|
||||
@@ -187,11 +189,13 @@ file out level insize outsize millis mb/s
|
||||
enwik9 zskp 1 1000000000 343848582 3609 264.18
|
||||
enwik9 zskp 2 1000000000 317276632 5746 165.97
|
||||
enwik9 zskp 3 1000000000 292243069 12162 78.41
|
||||
enwik9 zskp 4 1000000000 275241169 36430 26.18
|
||||
enwik9 zskp 4 1000000000 262183768 82837 11.51
|
||||
|
||||
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
||||
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
||||
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
||||
enwik9 zstd 9 1000000000 278348700 28549 33.40
|
||||
|
||||
enwik9 gzstd 1 1000000000 382578136 9604 99.30
|
||||
enwik9 gzkp 1 1000000000 383825945 6544 145.73
|
||||
|
||||
@@ -202,13 +206,15 @@ file out level insize outsize millis mb/s
|
||||
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
|
||||
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
|
||||
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
|
||||
github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78
|
||||
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
|
||||
|
||||
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
||||
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
||||
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
||||
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
|
||||
|
||||
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
|
||||
github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03
|
||||
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
|
||||
|
||||
VM Image, Linux mint with a few installed applications:
|
||||
https://files.klauspost.com/compress/rawstudio-mint14.7z
|
||||
@@ -217,13 +223,15 @@ file out level insize outsize millis mb/s
|
||||
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
|
||||
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
|
||||
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
|
||||
rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16
|
||||
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
|
||||
|
||||
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
||||
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
||||
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
||||
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
|
||||
|
||||
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
|
||||
rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49
|
||||
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
|
||||
|
||||
CSV data:
|
||||
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
|
||||
@@ -232,13 +240,15 @@ file out level insize outsize millis mb/s
|
||||
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
|
||||
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
|
||||
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
|
||||
nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10
|
||||
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
|
||||
|
||||
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
||||
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
||||
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
||||
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
|
||||
|
||||
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
|
||||
nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53
|
||||
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
|
||||
```
|
||||
|
||||
## Decompressor
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@@ -168,10 +168,10 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
|
||||
// Read block data.
|
||||
if cap(b.dataStorage) < cSize {
|
||||
if b.lowMem {
|
||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||
b.dataStorage = make([]byte, 0, cSize)
|
||||
} else {
|
||||
b.dataStorage = make([]byte, 0, maxBlockSize)
|
||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
if cap(b.dst) <= maxSize {
|
||||
|
5
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@@ -260,9 +260,10 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
|
||||
if len(d.current.b) > 0 {
|
||||
n2, err2 := w.Write(d.current.b)
|
||||
n += int64(n2)
|
||||
if err2 != nil && d.current.err == nil {
|
||||
if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
|
||||
d.current.err = err2
|
||||
break
|
||||
} else if n2 != len(d.current.b) {
|
||||
d.current.err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if d.current.err != nil {
|
||||
|
25
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
25
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@@ -17,14 +17,16 @@ type decoderOptions struct {
|
||||
lowMem bool
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
}
|
||||
|
||||
func (o *decoderOptions) setDefault() {
|
||||
*o = decoderOptions{
|
||||
// use less ram: true for now, but may change.
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
}
|
||||
o.maxDecodedSize = 1 << 63
|
||||
}
|
||||
@@ -52,7 +54,6 @@ func WithDecoderConcurrency(n int) DOption {
|
||||
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
|
||||
// non-streaming operations or maximum window size for streaming operations.
|
||||
// This can be used to control memory usage of potentially hostile content.
|
||||
// For streaming operations, the maximum window size is capped at 1<<30 bytes.
|
||||
// Maximum and default is 1 << 63 bytes.
|
||||
func WithDecoderMaxMemory(n uint64) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
@@ -81,3 +82,21 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
||||
// This allows rejecting packets that will cause big memory usage.
|
||||
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
||||
// If WithDecoderMaxMemory is set to a lower value, that will be used.
|
||||
// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
|
||||
func WithDecoderMaxWindow(size uint64) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
if size < MinWindowSize {
|
||||
return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes")
|
||||
}
|
||||
if size > (1<<41)+7*(1<<38) {
|
||||
return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB")
|
||||
}
|
||||
o.maxWindowSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@@ -38,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
|
||||
|
||||
// WindowSize returns the window size of the encoder,
|
||||
// or a window size small enough to contain the input size, if > 0.
|
||||
func (e *fastBase) WindowSize(size int) int32 {
|
||||
if size > 0 && size < int(e.maxMatchOff) {
|
||||
func (e *fastBase) WindowSize(size int64) int32 {
|
||||
if size > 0 && size < int64(e.maxMatchOff) {
|
||||
b := int32(1) << uint(bits.Len(uint(size)))
|
||||
// Keep minimum window.
|
||||
if b < 1024 {
|
||||
|
161
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
161
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@@ -5,22 +5,61 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress"
|
||||
)
|
||||
|
||||
const (
|
||||
bestLongTableBits = 20 // Bits used in the long match table
|
||||
bestLongTableBits = 22 // Bits used in the long match table
|
||||
bestLongTableSize = 1 << bestLongTableBits // Size of the table
|
||||
bestLongLen = 8 // Bytes used for table hash
|
||||
|
||||
// Note: Increasing the short table bits or making the hash shorter
|
||||
// can actually lead to compression degradation since it will 'steal' more from the
|
||||
// long match table and match offsets are quite big.
|
||||
// This greatly depends on the type of input.
|
||||
bestShortTableBits = 16 // Bits used in the short match table
|
||||
bestShortTableBits = 18 // Bits used in the short match table
|
||||
bestShortTableSize = 1 << bestShortTableBits // Size of the table
|
||||
bestShortLen = 4 // Bytes used for table hash
|
||||
|
||||
)
|
||||
|
||||
type match struct {
|
||||
offset int32
|
||||
s int32
|
||||
length int32
|
||||
rep int32
|
||||
est int32
|
||||
}
|
||||
|
||||
const highScore = 25000
|
||||
|
||||
// estBits will estimate output bits from predefined tables.
|
||||
func (m *match) estBits(bitsPerByte int32) {
|
||||
mlc := mlCode(uint32(m.length - zstdMinMatch))
|
||||
var ofc uint8
|
||||
if m.rep < 0 {
|
||||
ofc = ofCode(uint32(m.s-m.offset) + 3)
|
||||
} else {
|
||||
ofc = ofCode(uint32(m.rep))
|
||||
}
|
||||
// Cost, excluding
|
||||
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
|
||||
|
||||
// Add cost of match encoding...
|
||||
m.est = int32(ofTT.outBits + mlTT.outBits)
|
||||
m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
|
||||
// Subtract savings compared to literal encoding...
|
||||
m.est -= (m.length * bitsPerByte) >> 10
|
||||
if m.est > 0 {
|
||||
// Unlikely gain..
|
||||
m.length = 0
|
||||
m.est = highScore
|
||||
}
|
||||
}
|
||||
|
||||
// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
|
||||
// The long match table contains the previous entry with the same hash,
|
||||
// effectively making it a "chain" of length 2.
|
||||
@@ -109,6 +148,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
// Use this to estimate literal cost.
|
||||
// Scaled by 10 bits.
|
||||
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
|
||||
// Huffman can never go < 1 bit/byte
|
||||
if bitsPerByte < 1024 {
|
||||
bitsPerByte = 1024
|
||||
}
|
||||
|
||||
// Override src
|
||||
src = e.hist
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
@@ -145,51 +192,49 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
type match struct {
|
||||
offset int32
|
||||
s int32
|
||||
length int32
|
||||
rep int32
|
||||
}
|
||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
return match{offset: offset, s: s}
|
||||
}
|
||||
return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||
}
|
||||
|
||||
bestOf := func(a, b match) match {
|
||||
aScore := b.s - a.s + a.length
|
||||
bScore := a.s - b.s + b.length
|
||||
if a.rep < 0 {
|
||||
aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
|
||||
}
|
||||
if b.rep < 0 {
|
||||
bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
|
||||
}
|
||||
if aScore >= bScore {
|
||||
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
const goodEnough = 100
|
||||
|
||||
nextHashL := hash8(cv, bestLongTableBits)
|
||||
nextHashS := hash4x64(cv, bestShortTableBits)
|
||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
return match{s: s, est: highScore}
|
||||
}
|
||||
if debugAsserts {
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
}
|
||||
}
|
||||
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||
m.estBits(bitsPerByte)
|
||||
return m
|
||||
}
|
||||
|
||||
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
|
||||
|
||||
if canRepeat && best.length < goodEnough {
|
||||
best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
|
||||
best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
|
||||
best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
|
||||
cv32 := uint32(cv >> 8)
|
||||
spp := s + 1
|
||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
||||
if best.length > 0 {
|
||||
best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
|
||||
best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
|
||||
best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
|
||||
cv32 = uint32(cv >> 24)
|
||||
spp += 2
|
||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
||||
}
|
||||
}
|
||||
// Load next and check...
|
||||
@@ -209,22 +254,28 @@ encodeLoop:
|
||||
}
|
||||
|
||||
s++
|
||||
candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
|
||||
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
||||
cv = load6432(src, s)
|
||||
cv2 := load6432(src, s+1)
|
||||
candidateL = e.longTable[hash8(cv, bestLongTableBits)]
|
||||
candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
|
||||
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||
|
||||
// Short at s+1
|
||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
||||
// Long at s+1, s+2
|
||||
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
||||
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
||||
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
||||
|
||||
if false {
|
||||
// Short at s+3.
|
||||
// Too often worse...
|
||||
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
|
||||
}
|
||||
// See if we can find a better match by checking where the current best ends.
|
||||
// Use that offset to see if we can find a better full match.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
|
||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||
candidateEnd := e.longTable[nextHashL]
|
||||
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
|
||||
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
|
||||
@@ -236,6 +287,12 @@ encodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
if debugAsserts {
|
||||
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
|
||||
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
|
||||
}
|
||||
}
|
||||
|
||||
// We have a match, we can store the forward value
|
||||
if best.rep > 0 {
|
||||
s = best.s
|
||||
@@ -284,8 +341,8 @@ encodeLoop:
|
||||
off := index0 + e.cur
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hash8(cv0, bestLongTableBits)
|
||||
h1 := hash4x64(cv0, bestShortTableBits)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||
off++
|
||||
@@ -311,7 +368,7 @@ encodeLoop:
|
||||
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
|
||||
}
|
||||
|
||||
if debugAsserts && canRepeat && int(offset1) > len(src) {
|
||||
if debugAsserts && int(offset1) > len(src) {
|
||||
panic("invalid offset")
|
||||
}
|
||||
|
||||
@@ -352,8 +409,8 @@ encodeLoop:
|
||||
// every entry
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hash8(cv0, bestLongTableBits)
|
||||
h1 := hash4x64(cv0, bestShortTableBits)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||
@@ -374,8 +431,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash4x64(cv, bestShortTableBits)
|
||||
nextHashL := hash8(cv, bestLongTableBits)
|
||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -425,7 +482,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
e.Encode(blk, src)
|
||||
}
|
||||
|
||||
// ResetDict will reset and set a dictionary if not nil
|
||||
// Reset will reset and set a dictionary if not nil
|
||||
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
e.resetBase(d, singleBlock)
|
||||
if d == nil {
|
||||
@@ -441,10 +498,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
const hashLog = bestShortTableBits
|
||||
|
||||
cv := load6432(d.content, i-e.maxMatchOff)
|
||||
nextHash := hash4x64(cv, hashLog) // 0 -> 4
|
||||
nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5
|
||||
nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
|
||||
nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
|
||||
nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
|
||||
nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
|
||||
nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
|
||||
nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
|
||||
e.dictTable[nextHash] = prevEntry{
|
||||
prev: e.dictTable[nextHash].offset,
|
||||
offset: i,
|
||||
@@ -472,7 +529,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
if len(d.content) >= 8 {
|
||||
cv := load6432(d.content, 0)
|
||||
h := hash8(cv, bestLongTableBits)
|
||||
h := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
e.dictLongTable[h] = prevEntry{
|
||||
offset: e.maxMatchOff,
|
||||
prev: e.dictLongTable[h].offset,
|
||||
@@ -482,7 +539,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
|
||||
off := 8 // First to read
|
||||
for i := e.maxMatchOff + 1; i < end; i++ {
|
||||
cv = cv>>8 | (uint64(d.content[off]) << 56)
|
||||
h := hash8(cv, bestLongTableBits)
|
||||
h := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
e.dictLongTable[h] = prevEntry{
|
||||
offset: i,
|
||||
prev: e.dictLongTable[h].offset,
|
||||
|
64
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
64
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@@ -9,6 +9,7 @@ import "fmt"
|
||||
const (
|
||||
betterLongTableBits = 19 // Bits used in the long match table
|
||||
betterLongTableSize = 1 << betterLongTableBits // Size of the table
|
||||
betterLongLen = 8 // Bytes used for table hash
|
||||
|
||||
// Note: Increasing the short table bits or making the hash shorter
|
||||
// can actually lead to compression degradation since it will 'steal' more from the
|
||||
@@ -16,6 +17,7 @@ const (
|
||||
// This greatly depends on the type of input.
|
||||
betterShortTableBits = 13 // Bits used in the short match table
|
||||
betterShortTableSize = 1 << betterShortTableBits // Size of the table
|
||||
betterShortLen = 5 // Bytes used for table hash
|
||||
|
||||
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
|
||||
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
|
||||
@@ -154,8 +156,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
@@ -214,10 +216,10 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
index0 += 2
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
@@ -275,10 +277,10 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
index0 += 2
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
@@ -353,7 +355,7 @@ encodeLoop:
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, betterLongTableBits)
|
||||
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = candidateL.offset - e.cur
|
||||
|
||||
@@ -413,8 +415,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Try to find a better match by searching for a long match at the end of the current best match
|
||||
if true && s+matched < sLimit {
|
||||
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
|
||||
if s+matched < sLimit {
|
||||
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
||||
cv := load3232(src, s)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
coffsetL := candidateL.offset - e.cur - matched
|
||||
@@ -495,10 +497,10 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
index0 += 2
|
||||
}
|
||||
|
||||
@@ -516,8 +518,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -672,8 +674,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
@@ -734,11 +736,11 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
@@ -798,11 +800,11 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
@@ -879,7 +881,7 @@ encodeLoop:
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, betterLongTableBits)
|
||||
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = candidateL.offset - e.cur
|
||||
|
||||
@@ -940,7 +942,7 @@ encodeLoop:
|
||||
}
|
||||
// Try to find a better match by searching for a long match at the end of the current best match
|
||||
if s+matched < sLimit {
|
||||
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
|
||||
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
||||
cv := load3232(src, s)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
coffsetL := candidateL.offset - e.cur - matched
|
||||
@@ -1021,11 +1023,11 @@ encodeLoop:
|
||||
for index0 < s-1 {
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := cv0 >> 8
|
||||
h0 := hash8(cv0, betterLongTableBits)
|
||||
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
|
||||
off := index0 + e.cur
|
||||
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
|
||||
e.markLongShardDirty(h0)
|
||||
h1 := hash5(cv1, betterShortTableBits)
|
||||
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
|
||||
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
|
||||
e.markShortShardDirty(h1)
|
||||
index0 += 2
|
||||
@@ -1045,8 +1047,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, betterShortTableBits)
|
||||
nextHashL := hash8(cv, betterLongTableBits)
|
||||
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
|
||||
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -1113,10 +1115,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
const hashLog = betterShortTableBits
|
||||
|
||||
cv := load6432(d.content, i-e.maxMatchOff)
|
||||
nextHash := hash5(cv, hashLog) // 0 -> 4
|
||||
nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5
|
||||
nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
|
||||
nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
|
||||
nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
|
||||
nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
|
||||
nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
|
||||
nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
|
||||
e.dictTable[nextHash] = tableEntry{
|
||||
val: uint32(cv),
|
||||
offset: i,
|
||||
@@ -1145,7 +1147,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
if len(d.content) >= 8 {
|
||||
cv := load6432(d.content, 0)
|
||||
h := hash8(cv, betterLongTableBits)
|
||||
h := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
e.dictLongTable[h] = prevEntry{
|
||||
offset: e.maxMatchOff,
|
||||
prev: e.dictLongTable[h].offset,
|
||||
@@ -1155,7 +1157,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
off := 8 // First to read
|
||||
for i := e.maxMatchOff + 1; i < end; i++ {
|
||||
cv = cv>>8 | (uint64(d.content[off]) << 56)
|
||||
h := hash8(cv, betterLongTableBits)
|
||||
h := hashLen(cv, betterLongTableBits, betterLongLen)
|
||||
e.dictLongTable[h] = prevEntry{
|
||||
offset: i,
|
||||
prev: e.dictLongTable[h].offset,
|
||||
|
61
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
61
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@@ -10,6 +10,7 @@ const (
|
||||
dFastLongTableBits = 17 // Bits used in the long match table
|
||||
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
|
||||
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
dFastLongLen = 8 // Bytes used for table hash
|
||||
|
||||
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
|
||||
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
|
||||
@@ -17,6 +18,8 @@ const (
|
||||
dFastShortTableBits = tableBits // Bits used in the short match table
|
||||
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
|
||||
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
dFastShortLen = 5 // Bytes used for table hash
|
||||
|
||||
)
|
||||
|
||||
type doubleFastEncoder struct {
|
||||
@@ -124,8 +127,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
@@ -208,7 +211,7 @@ encodeLoop:
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, dFastLongTableBits)
|
||||
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = s - (candidateL.offset - e.cur) + checkAt
|
||||
|
||||
@@ -304,16 +307,16 @@ encodeLoop:
|
||||
cv1 := load6432(src, index1)
|
||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||
e.longTable[hash8(cv0, dFastLongTableBits)] = te0
|
||||
e.longTable[hash8(cv1, dFastLongTableBits)] = te1
|
||||
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
|
||||
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
|
||||
cv0 >>= 8
|
||||
cv1 >>= 8
|
||||
te0.offset++
|
||||
te1.offset++
|
||||
te0.val = uint32(cv0)
|
||||
te1.val = uint32(cv1)
|
||||
e.table[hash5(cv0, dFastShortTableBits)] = te0
|
||||
e.table[hash5(cv1, dFastShortTableBits)] = te1
|
||||
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
|
||||
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
||||
@@ -330,8 +333,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -436,8 +439,8 @@ encodeLoop:
|
||||
var t int32
|
||||
for {
|
||||
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
@@ -521,7 +524,7 @@ encodeLoop:
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, dFastLongTableBits)
|
||||
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = s - (candidateL.offset - e.cur) + checkAt
|
||||
|
||||
@@ -614,16 +617,16 @@ encodeLoop:
|
||||
cv1 := load6432(src, index1)
|
||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||
e.longTable[hash8(cv0, dFastLongTableBits)] = te0
|
||||
e.longTable[hash8(cv1, dFastLongTableBits)] = te1
|
||||
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
|
||||
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
|
||||
cv0 >>= 8
|
||||
cv1 >>= 8
|
||||
te0.offset++
|
||||
te1.offset++
|
||||
te0.val = uint32(cv0)
|
||||
te1.val = uint32(cv1)
|
||||
e.table[hash5(cv0, dFastShortTableBits)] = te0
|
||||
e.table[hash5(cv1, dFastShortTableBits)] = te1
|
||||
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
|
||||
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
||||
@@ -640,8 +643,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv1>>8, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -782,8 +785,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
@@ -868,7 +871,7 @@ encodeLoop:
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, dFastLongTableBits)
|
||||
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = s - (candidateL.offset - e.cur) + checkAt
|
||||
|
||||
@@ -965,8 +968,8 @@ encodeLoop:
|
||||
cv1 := load6432(src, index1)
|
||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||
longHash1 := hash8(cv0, dFastLongTableBits)
|
||||
longHash2 := hash8(cv0, dFastLongTableBits)
|
||||
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
|
||||
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
|
||||
e.longTable[longHash1] = te0
|
||||
e.longTable[longHash2] = te1
|
||||
e.markLongShardDirty(longHash1)
|
||||
@@ -977,8 +980,8 @@ encodeLoop:
|
||||
te1.offset++
|
||||
te0.val = uint32(cv0)
|
||||
te1.val = uint32(cv1)
|
||||
hashVal1 := hash5(cv0, dFastShortTableBits)
|
||||
hashVal2 := hash5(cv1, dFastShortTableBits)
|
||||
hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
|
||||
hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
|
||||
e.table[hashVal1] = te0
|
||||
e.markShardDirty(hashVal1)
|
||||
e.table[hashVal2] = te1
|
||||
@@ -999,8 +1002,8 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
|
||||
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
@@ -1071,14 +1074,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
if len(d.content) >= 8 {
|
||||
cv := load6432(d.content, 0)
|
||||
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
|
||||
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
|
||||
val: uint32(cv),
|
||||
offset: e.maxMatchOff,
|
||||
}
|
||||
end := int32(len(d.content)) - 8 + e.maxMatchOff
|
||||
for i := e.maxMatchOff + 1; i < end; i++ {
|
||||
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
|
||||
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
|
||||
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
|
||||
val: uint32(cv),
|
||||
offset: i,
|
||||
}
|
||||
|
37
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
37
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@@ -11,12 +11,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
tableBits = 15 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
|
||||
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
maxMatchLength = 131074
|
||||
tableBits = 15 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
|
||||
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
|
||||
tableFastHashLen = 6
|
||||
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
|
||||
maxMatchLength = 131074
|
||||
)
|
||||
|
||||
type tableEntry struct {
|
||||
@@ -122,8 +123,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash2 := hash6(cv>>8, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
|
||||
candidate := e.table[nextHash]
|
||||
candidate2 := e.table[nextHash2]
|
||||
repIndex := s - offset1 + 2
|
||||
@@ -301,7 +302,7 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
@@ -405,8 +406,8 @@ encodeLoop:
|
||||
// By not using them for the first 3 matches
|
||||
|
||||
for {
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash2 := hash6(cv>>8, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
|
||||
candidate := e.table[nextHash]
|
||||
candidate2 := e.table[nextHash2]
|
||||
repIndex := s - offset1 + 2
|
||||
@@ -589,7 +590,7 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
@@ -715,8 +716,8 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash2 := hash6(cv>>8, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
|
||||
candidate := e.table[nextHash]
|
||||
candidate2 := e.table[nextHash2]
|
||||
repIndex := s - offset1 + 2
|
||||
@@ -896,7 +897,7 @@ encodeLoop:
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.markShardDirty(nextHash)
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
@@ -957,9 +958,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
const hashLog = tableBits
|
||||
|
||||
cv := load6432(d.content, i-e.maxMatchOff)
|
||||
nextHash := hash6(cv, hashLog) // 0 -> 5
|
||||
nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6
|
||||
nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
|
||||
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
|
||||
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
|
||||
nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
|
||||
e.dictTable[nextHash] = tableEntry{
|
||||
val: uint32(cv),
|
||||
offset: i,
|
||||
|
31
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
31
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@@ -33,7 +33,7 @@ type encoder interface {
|
||||
Block() *blockEnc
|
||||
CRC() *xxhash.Digest
|
||||
AppendCRC([]byte) []byte
|
||||
WindowSize(size int) int32
|
||||
WindowSize(size int64) int32
|
||||
UseBlock(*blockEnc)
|
||||
Reset(d *dict, singleBlock bool)
|
||||
}
|
||||
@@ -48,6 +48,8 @@ type encoderState struct {
|
||||
err error
|
||||
writeErr error
|
||||
nWritten int64
|
||||
nInput int64
|
||||
frameContentSize int64
|
||||
headerWritten bool
|
||||
eofWritten bool
|
||||
fullFrameWritten bool
|
||||
@@ -120,7 +122,21 @@ func (e *Encoder) Reset(w io.Writer) {
|
||||
s.w = w
|
||||
s.err = nil
|
||||
s.nWritten = 0
|
||||
s.nInput = 0
|
||||
s.writeErr = nil
|
||||
s.frameContentSize = 0
|
||||
}
|
||||
|
||||
// ResetContentSize will reset and set a content size for the next stream.
|
||||
// If the bytes written does not match the size given an error will be returned
|
||||
// when calling Close().
|
||||
// This is removed when Reset is called.
|
||||
// Sizes <= 0 results in no content size set.
|
||||
func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
|
||||
e.Reset(w)
|
||||
if size >= 0 {
|
||||
e.state.frameContentSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// Write data to the encoder.
|
||||
@@ -190,6 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||
return s.err
|
||||
}
|
||||
s.nWritten += int64(n2)
|
||||
s.nInput += int64(len(s.filling))
|
||||
s.current = s.current[:0]
|
||||
s.filling = s.filling[:0]
|
||||
s.headerWritten = true
|
||||
@@ -200,8 +217,8 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||
|
||||
var tmp [maxHeaderSize]byte
|
||||
fh := frameHeader{
|
||||
ContentSize: 0,
|
||||
WindowSize: uint32(s.encoder.WindowSize(0)),
|
||||
ContentSize: uint64(s.frameContentSize),
|
||||
WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
|
||||
SingleSegment: false,
|
||||
Checksum: e.o.crc,
|
||||
DictID: e.o.dict.ID(),
|
||||
@@ -243,6 +260,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||
|
||||
// Move blocks forward.
|
||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||
s.nInput += int64(len(s.current))
|
||||
s.wg.Add(1)
|
||||
go func(src []byte) {
|
||||
if debugEncoder {
|
||||
@@ -394,6 +412,11 @@ func (e *Encoder) Close() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.frameContentSize > 0 {
|
||||
if s.nInput != s.frameContentSize {
|
||||
return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
|
||||
}
|
||||
}
|
||||
if e.state.fullFrameWritten {
|
||||
return s.err
|
||||
}
|
||||
@@ -470,7 +493,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
}
|
||||
fh := frameHeader{
|
||||
ContentSize: uint64(len(src)),
|
||||
WindowSize: uint32(enc.WindowSize(len(src))),
|
||||
WindowSize: uint32(enc.WindowSize(int64(len(src)))),
|
||||
SingleSegment: single,
|
||||
Checksum: e.o.crc,
|
||||
DictID: e.o.dict.ID(),
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@@ -189,7 +189,7 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
|
||||
case level >= 6 && level < 10:
|
||||
return SpeedBetterCompression
|
||||
case level >= 10:
|
||||
return SpeedBetterCompression
|
||||
return SpeedBestCompression
|
||||
}
|
||||
return SpeedDefault
|
||||
}
|
||||
|
32
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
32
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@@ -22,10 +22,6 @@ type frameDec struct {
|
||||
|
||||
WindowSize uint64
|
||||
|
||||
// maxWindowSize is the maximum windows size to support.
|
||||
// should never be bigger than max-int.
|
||||
maxWindowSize uint64
|
||||
|
||||
// In order queue of blocks being decoded.
|
||||
decoding chan *blockDec
|
||||
|
||||
@@ -50,8 +46,11 @@ type frameDec struct {
|
||||
}
|
||||
|
||||
const (
|
||||
// The minimum Window_Size is 1 KB.
|
||||
// MinWindowSize is the minimum Window Size, which is 1 KB.
|
||||
MinWindowSize = 1 << 10
|
||||
|
||||
// MaxWindowSize is the maximum encoder window size
|
||||
// and the default decoder maximum window size.
|
||||
MaxWindowSize = 1 << 29
|
||||
)
|
||||
|
||||
@@ -61,12 +60,11 @@ var (
|
||||
)
|
||||
|
||||
func newFrameDec(o decoderOptions) *frameDec {
|
||||
d := frameDec{
|
||||
o: o,
|
||||
maxWindowSize: MaxWindowSize,
|
||||
if o.maxWindowSize > o.maxDecodedSize {
|
||||
o.maxWindowSize = o.maxDecodedSize
|
||||
}
|
||||
if d.maxWindowSize > o.maxDecodedSize {
|
||||
d.maxWindowSize = o.maxDecodedSize
|
||||
d := frameDec{
|
||||
o: o,
|
||||
}
|
||||
return &d
|
||||
}
|
||||
@@ -251,13 +249,17 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
}
|
||||
}
|
||||
|
||||
if d.WindowSize > d.maxWindowSize {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize)
|
||||
if d.WindowSize > uint64(d.o.maxWindowSize) {
|
||||
if debugDecoder {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||
}
|
||||
return ErrWindowSizeExceeded
|
||||
}
|
||||
// The minimum Window_Size is 1 KB.
|
||||
if d.WindowSize < MinWindowSize {
|
||||
println("got window size: ", d.WindowSize)
|
||||
if debugDecoder {
|
||||
println("got window size: ", d.WindowSize)
|
||||
}
|
||||
return ErrWindowSizeTooSmall
|
||||
}
|
||||
d.history.windowSize = int(d.WindowSize)
|
||||
@@ -352,8 +354,8 @@ func (d *frameDec) checkCRC() error {
|
||||
|
||||
func (d *frameDec) initAsync() {
|
||||
if !d.o.lowMem && !d.SingleSegment {
|
||||
// set max extra size history to 10MB.
|
||||
d.history.maxSize = d.history.windowSize + maxBlockSize*5
|
||||
// set max extra size history to 2MB.
|
||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
// re-alloc if more than one extra block size.
|
||||
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
|
||||
|
60
vendor/github.com/klauspost/compress/zstd/hash.go
generated
vendored
60
vendor/github.com/klauspost/compress/zstd/hash.go
generated
vendored
@@ -13,24 +13,24 @@ const (
|
||||
prime8bytes = 0xcf1bbcdcb7a56463
|
||||
)
|
||||
|
||||
// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
|
||||
// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
|
||||
// h should always be <32.
|
||||
// Preferably h and l should be a constant.
|
||||
// FIXME: This does NOT get resolved, if 'mls' is constant,
|
||||
// so this cannot be used.
|
||||
func hashLen(u uint64, hashLog, mls uint8) uint32 {
|
||||
// hashLen returns a hash of the lowest mls bytes of with length output bits.
|
||||
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
|
||||
// length should always be < 32.
|
||||
// Preferably length and mls should be a constant for inlining.
|
||||
func hashLen(u uint64, length, mls uint8) uint32 {
|
||||
switch mls {
|
||||
case 3:
|
||||
return (uint32(u<<8) * prime3bytes) >> (32 - length)
|
||||
case 5:
|
||||
return hash5(u, hashLog)
|
||||
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
|
||||
case 6:
|
||||
return hash6(u, hashLog)
|
||||
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
|
||||
case 7:
|
||||
return hash7(u, hashLog)
|
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
|
||||
case 8:
|
||||
return hash8(u, hashLog)
|
||||
return uint32((u * prime8bytes) >> (64 - length))
|
||||
default:
|
||||
return hash4x64(u, hashLog)
|
||||
return (uint32(u) * prime4bytes) >> (32 - length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,39 +39,3 @@ func hashLen(u uint64, hashLog, mls uint8) uint32 {
|
||||
func hash3(u uint32, h uint8) uint32 {
|
||||
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
|
||||
}
|
||||
|
||||
// hash4 returns the hash of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <32.
|
||||
func hash4(u uint32, h uint8) uint32 {
|
||||
return (u * prime4bytes) >> ((32 - h) & 31)
|
||||
}
|
||||
|
||||
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <32.
|
||||
func hash4x64(u uint64, h uint8) uint32 {
|
||||
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
|
||||
}
|
||||
|
||||
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash5(u uint64, h uint8) uint32 {
|
||||
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash6(u uint64, h uint8) uint32 {
|
||||
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash7(u uint64, h uint8) uint32 {
|
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// hash8 returns the hash of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash8(u uint64, h uint8) uint32 {
|
||||
return uint32((u * prime8bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
@@ -195,7 +195,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine,gc,!purego
|
||||
|
||||
package xxhash
|
||||
|
||||
@@ -10,4 +9,4 @@ package xxhash
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(*Digest, []byte) int
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
|
66
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
66
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@@ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ arg1_base+8(FP), CX
|
||||
MOVQ arg1_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ arg+0(FP), AX
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
@@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ arg1_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !amd64 || appengine || !gc || purego
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@@ -10,8 +10,8 @@ import (
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/klauspost/compress/huff0"
|
||||
snappy "github.com/klauspost/compress/internal/snapref"
|
||||
)
|
||||
|
||||
const (
|
||||
|
9
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
9
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
@@ -64,8 +64,9 @@ func (r *pooledZipReader) Close() error {
|
||||
}
|
||||
|
||||
type pooledZipWriter struct {
|
||||
mu sync.Mutex // guards Close and Read
|
||||
enc *Encoder
|
||||
mu sync.Mutex // guards Close and Read
|
||||
enc *Encoder
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
|
||||
@@ -83,7 +84,7 @@ func (w *pooledZipWriter) Close() error {
|
||||
var err error
|
||||
if w.enc != nil {
|
||||
err = w.enc.Close()
|
||||
zipReaderPool.Put(w.enc)
|
||||
w.pool.Put(w.enc)
|
||||
w.enc = nil
|
||||
}
|
||||
return err
|
||||
@@ -104,7 +105,7 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pooledZipWriter{enc: enc}, nil
|
||||
return &pooledZipWriter{enc: enc, pool: &pool}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user