mirror of
https://github.com/containers/skopeo.git
synced 2025-09-10 02:59:50 +00:00
Update c/image after https://github.com/containers/image/pull/2408
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
7
vendor/github.com/klauspost/compress/README.md
generated
vendored
7
vendor/github.com/klauspost/compress/README.md
generated
vendored
@@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
|
||||
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
|
||||
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.16.x</summary>
|
||||
|
||||
|
||||
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
|
||||
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
|
||||
@@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
@@ -560,6 +565,8 @@ the stateless compress described below.
|
||||
|
||||
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||
|
||||
To disable all assembly add `-tags=noasm`. This works across all packages.
|
||||
|
||||
# Stateless compression
|
||||
|
||||
This package offers stateless compression as a special option for gzip/deflate.
|
||||
|
2
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
2
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||
// length emitted down below is a little lower (at 60 = 64 - 4), because
|
||||
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||
|
3
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
if debugDecoder {
|
||||
printf("Compression modes: 0b%b", compMode)
|
||||
}
|
||||
if compMode&3 != 0 {
|
||||
return errors.New("corrupt block: reserved bits not zero")
|
||||
}
|
||||
for i := uint(0); i < 3; i++ {
|
||||
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
|
||||
if debugDecoder {
|
||||
|
20
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
20
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeRLE will encode an RLE block.
|
||||
func (b *blockEnc) encodeRLE(val byte, length uint32) {
|
||||
var bh blockHeader
|
||||
bh.setLast(b.last)
|
||||
bh.setSize(length)
|
||||
bh.setType(blockTypeRLE)
|
||||
b.output = bh.appendTo(b.output)
|
||||
b.output = append(b.output, val)
|
||||
}
|
||||
|
||||
// fuzzFseEncoder can be used to fuzz the FSE encoder.
|
||||
func fuzzFseEncoder(data []byte) int {
|
||||
if len(data) > maxSequences || len(data) < 2 {
|
||||
@@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
||||
if len(b.sequences) == 0 {
|
||||
return b.encodeLits(b.literals, rawAllLits)
|
||||
}
|
||||
if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
|
||||
// Check common RLE cases.
|
||||
seq := b.sequences[0]
|
||||
if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
|
||||
// Offset == 1 and 0 or 1 literals.
|
||||
b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// We want some difference to at least account for the headers.
|
||||
saved := b.size - len(b.literals) - (b.size >> 6)
|
||||
if saved < 16 {
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@@ -82,7 +82,7 @@ var (
|
||||
// can run multiple concurrent stateless decodes. It is even possible to
|
||||
// use stateless decodes while a stream is being decoded.
|
||||
//
|
||||
// The Reset function can be used to initiate a new stream, which is will considerably
|
||||
// The Reset function can be used to initiate a new stream, which will considerably
|
||||
// reduce the allocations normally caused by NewReader.
|
||||
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||
initPredefined()
|
||||
|
12
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
break
|
||||
}
|
||||
|
||||
// Add block to history
|
||||
s := e.addBlock(src)
|
||||
blk.size = len(src)
|
||||
|
||||
// Check RLE first
|
||||
if len(src) > zstdMinMatch {
|
||||
ml := matchLen(src[1:], src)
|
||||
if ml == len(src)-1 {
|
||||
blk.literals = append(blk.literals, src[0])
|
||||
blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
|
13
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
e.cur = e.maxMatchOff
|
||||
break
|
||||
}
|
||||
|
||||
// Add block to history
|
||||
s := e.addBlock(src)
|
||||
blk.size = len(src)
|
||||
|
||||
// Check RLE first
|
||||
if len(src) > zstdMinMatch {
|
||||
ml := matchLen(src[1:], src)
|
||||
if ml == len(src)-1 {
|
||||
blk.literals = append(blk.literals, src[0])
|
||||
blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
|
Reference in New Issue
Block a user