mirror of
https://github.com/containers/skopeo.git
synced 2025-09-24 03:17:17 +00:00
Bump github.com/containers/storage from 1.37.0 to 1.38.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.37.0 to 1.38.0. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.37.0...v1.38.0) --- updated-dependencies: - dependency-name: github.com/containers/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
189
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
189
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@@ -155,37 +155,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||
w.lastHuffMan = false
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
|
||||
offsets, lits = true, true
|
||||
func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
|
||||
a := t.offHist[:offsetCodeCount]
|
||||
b := w.offsetFreq[:len(a)]
|
||||
for i := range a {
|
||||
if b[i] == 0 && a[i] != 0 {
|
||||
offsets = false
|
||||
break
|
||||
b := w.offsetEncoding.codes
|
||||
b = b[:len(a)]
|
||||
for i, v := range a {
|
||||
if v != 0 && b[i].len == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
a = t.extraHist[:literalCount-256]
|
||||
b = w.literalFreq[256:literalCount]
|
||||
b = w.literalEncoding.codes[256:literalCount]
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if b[i] == 0 && a[i] != 0 {
|
||||
lits = false
|
||||
break
|
||||
for i, v := range a {
|
||||
if v != 0 && b[i].len == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if lits {
|
||||
a = t.litHist[:]
|
||||
b = w.literalFreq[:len(a)]
|
||||
for i := range a {
|
||||
if b[i] == 0 && a[i] != 0 {
|
||||
lits = false
|
||||
break
|
||||
}
|
||||
|
||||
a = t.litHist[:256]
|
||||
b = w.literalEncoding.codes[:len(a)]
|
||||
for i, v := range a {
|
||||
if v != 0 && b[i].len == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) flush() {
|
||||
@@ -222,7 +218,7 @@ func (w *huffmanBitWriter) write(b []byte) {
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
||||
w.bits |= uint64(b) << w.nbits
|
||||
w.bits |= uint64(b) << (w.nbits & 63)
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
@@ -423,7 +419,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
||||
|
||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
||||
// The function does not get inlined if we "& 63" the shift.
|
||||
w.bits |= uint64(c.code) << w.nbits
|
||||
w.bits |= uint64(c.code) << (w.nbits & 63)
|
||||
w.nbits += c.len
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
@@ -566,7 +562,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||
w.lastHeader = 0
|
||||
}
|
||||
numLiterals, numOffsets := w.indexTokens(tokens, false)
|
||||
w.generate(tokens)
|
||||
w.generate()
|
||||
var extraBits int
|
||||
storedSize, storable := w.storedSize(input)
|
||||
if storable {
|
||||
@@ -595,7 +591,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||
}
|
||||
|
||||
// Stored bytes?
|
||||
if storable && storedSize < size {
|
||||
if storable && storedSize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
@@ -634,22 +630,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
w.lastHeader = 0
|
||||
w.lastHuffMan = false
|
||||
}
|
||||
if !sync {
|
||||
tokens.Fill()
|
||||
|
||||
// fillReuse enables filling of empty values.
|
||||
// This will make encodings always reusable without testing.
|
||||
// However, this does not appear to benefit on most cases.
|
||||
const fillReuse = false
|
||||
|
||||
// Check if we can reuse...
|
||||
if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
|
||||
w.writeCode(w.literalEncoding.codes[endBlockMarker])
|
||||
w.lastHeader = 0
|
||||
}
|
||||
|
||||
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
|
||||
extraBits := 0
|
||||
ssize, storable := w.storedSize(input)
|
||||
|
||||
const usePrefs = true
|
||||
if storable || w.lastHeader > 0 {
|
||||
extraBits = w.extraBitSize()
|
||||
}
|
||||
|
||||
var size int
|
||||
|
||||
// Check if we should reuse.
|
||||
if w.lastHeader > 0 {
|
||||
// Estimate size for using a new table.
|
||||
// Use the previous header size as the best estimate.
|
||||
newSize := w.lastHeader + tokens.EstimatedBits()
|
||||
newSize += newSize >> w.logNewTablePenalty
|
||||
newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
|
||||
|
||||
// The estimated size is calculated as an optimal table.
|
||||
// We add a penalty to make it more realistic and re-use a bit more.
|
||||
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
|
||||
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
|
||||
|
||||
// Check if a new table is better.
|
||||
if newSize < reuseSize {
|
||||
@@ -660,35 +673,79 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
} else {
|
||||
size = reuseSize
|
||||
}
|
||||
|
||||
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
// Check if we get a reasonable size decrease.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
w.lastHeader = 0
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We want a new block/table
|
||||
if w.lastHeader == 0 {
|
||||
w.generate(tokens)
|
||||
if fillReuse && !sync {
|
||||
w.fillTokens()
|
||||
numLiterals, numOffsets = maxNumLit, maxNumDist
|
||||
} else {
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
}
|
||||
|
||||
w.generate()
|
||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||
// the literalEncoding and the offsetEncoding.
|
||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||
|
||||
var numCodegens int
|
||||
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
||||
if fillReuse && !sync {
|
||||
// Reindex for accurate size...
|
||||
w.indexTokens(tokens, true)
|
||||
}
|
||||
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
// Store predefined, if we don't get a reasonable improvement.
|
||||
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
if storable && ssize <= preSize {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
|
||||
if storable && ssize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
w.lastHeader = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Write Huffman table.
|
||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||
w.lastHeader, _ = w.headerSize()
|
||||
if !sync {
|
||||
w.lastHeader, _ = w.headerSize()
|
||||
}
|
||||
w.lastHuffMan = false
|
||||
}
|
||||
|
||||
@@ -699,6 +756,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) fillTokens() {
|
||||
for i, v := range w.literalFreq[:literalCount] {
|
||||
if v == 0 {
|
||||
w.literalFreq[i] = 1
|
||||
}
|
||||
}
|
||||
for i, v := range w.offsetFreq[:offsetCodeCount] {
|
||||
if v == 0 {
|
||||
w.offsetFreq[i] = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// indexTokens indexes a slice of tokens, and updates
|
||||
// literalFreq and offsetFreq, and generates literalEncoding
|
||||
// and offsetEncoding.
|
||||
@@ -733,7 +803,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
|
||||
return
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) generate(t *tokens) {
|
||||
func (w *huffmanBitWriter) generate() {
|
||||
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
|
||||
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
|
||||
}
|
||||
@@ -768,7 +838,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
if t < matchType {
|
||||
//w.writeCode(lits[t.literal()])
|
||||
c := lits[t.literal()]
|
||||
bits |= uint64(c.code) << nbits
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
@@ -796,7 +866,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
} else {
|
||||
// inlined
|
||||
c := lengths[lengthCode&31]
|
||||
bits |= uint64(c.code) << nbits
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
@@ -819,7 +889,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
if extraLengthBits > 0 {
|
||||
//w.writeBits(extraLength, extraLengthBits)
|
||||
extraLength := int32(length - lengthBase[lengthCode&31])
|
||||
bits |= uint64(extraLength) << nbits
|
||||
bits |= uint64(extraLength) << (nbits & 63)
|
||||
nbits += extraLengthBits
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
@@ -846,7 +916,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
} else {
|
||||
// inlined
|
||||
c := offs[offsetCode]
|
||||
bits |= uint64(c.code) << nbits
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
@@ -867,7 +937,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
offsetComb := offsetCombined[offsetCode]
|
||||
if offsetComb > 1<<16 {
|
||||
//w.writeBits(extraOffset, extraOffsetBits)
|
||||
bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
|
||||
bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
|
||||
nbits += uint16(offsetComb >> 16)
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
@@ -996,10 +1066,41 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
encoding := w.literalEncoding.codes[:256]
|
||||
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
|
||||
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
||||
|
||||
// Unroll, write 3 codes/loop.
|
||||
// Fastest number of unrolls.
|
||||
for len(input) > 3 {
|
||||
// We must have at least 48 bits free.
|
||||
if nbits >= 8 {
|
||||
n := nbits >> 3
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
bits >>= (n * 8) & 63
|
||||
nbits -= n * 8
|
||||
nbytes += uint8(n)
|
||||
}
|
||||
if nbytes >= bufferFlushSize {
|
||||
if w.err != nil {
|
||||
nbytes = 0
|
||||
return
|
||||
}
|
||||
_, w.err = w.writer.Write(w.bytes[:nbytes])
|
||||
nbytes = 0
|
||||
}
|
||||
a, b := encoding[input[0]], encoding[input[1]]
|
||||
bits |= uint64(a.code) << (nbits & 63)
|
||||
bits |= uint64(b.code) << ((nbits + a.len) & 63)
|
||||
c := encoding[input[2]]
|
||||
nbits += b.len + a.len
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
input = input[3:]
|
||||
}
|
||||
|
||||
// Remaining...
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
bits |= uint64(c.code) << nbits
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if debugDeflate {
|
||||
count += int(c.len)
|
||||
|
Reference in New Issue
Block a user