mirror of
https://github.com/containers/skopeo.git
synced 2025-09-09 10:39:30 +00:00
Update c/image from the main branch
> go get github.com/containers/image/v5@main > make vendor Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@@ -3,7 +3,7 @@
|
||||
before:
|
||||
hooks:
|
||||
- ./gen.sh
|
||||
- go install mvdan.cc/garble@v0.7.2
|
||||
- go install mvdan.cc/garble@v0.9.3
|
||||
|
||||
builds:
|
||||
-
|
||||
|
21
vendor/github.com/klauspost/compress/README.md
generated
vendored
21
vendor/github.com/klauspost/compress/README.md
generated
vendored
@@ -16,6 +16,27 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
|
||||
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
|
||||
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
|
||||
|
||||
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
|
||||
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
|
||||
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
|
||||
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
|
||||
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||
|
||||
* Jan 3rd, 2023 (v1.15.14)
|
||||
|
||||
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
|
||||
|
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
@@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
|
||||
// If the buffer is over-read an error is returned.
|
||||
func (s *Scratch) decompress() error {
|
||||
br := &s.bits
|
||||
br.init(s.br.unread())
|
||||
if err := br.init(s.br.unread()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var s1, s2 decoder
|
||||
// Initialize and decode first state and symbol.
|
||||
|
16
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
16
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
||||
b.nBits += encA.nBits + encB.nBits
|
||||
}
|
||||
|
||||
// encFourSymbols adds up to 32 bits from four symbols.
|
||||
// It will not check if there is space for them,
|
||||
// so the caller must ensure that b has been flushed recently.
|
||||
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
|
||||
bitsA := encA.nBits
|
||||
bitsB := bitsA + encB.nBits
|
||||
bitsC := bitsB + encC.nBits
|
||||
bitsD := bitsC + encD.nBits
|
||||
combined := uint64(encA.val) |
|
||||
(uint64(encB.val) << (bitsA & 63)) |
|
||||
(uint64(encC.val) << (bitsB & 63)) |
|
||||
(uint64(encD.val) << (bitsC & 63))
|
||||
b.bitContainer |= combined << (b.nBits & 63)
|
||||
b.nBits += bitsD
|
||||
}
|
||||
|
||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||
func (b *bitWriter) flush32() {
|
||||
if b.nBits < 32 {
|
||||
|
3
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
3
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
||||
tmp := src[n : n+4]
|
||||
// tmp should be len 4
|
||||
bw.flush32()
|
||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
|
||||
}
|
||||
} else {
|
||||
for ; n >= 0; n -= 4 {
|
||||
|
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
||||
b, err := fse.Decompress(in[:iSize], s.fse)
|
||||
s.fse.Out = nil
|
||||
if err != nil {
|
||||
return s, nil, err
|
||||
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
|
||||
}
|
||||
if len(b) > 255 {
|
||||
return s, nil, errors.New("corrupt input: output table too large")
|
||||
|
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> shift
|
||||
}
|
||||
|
||||
// EncodeBlockInto exposes encodeBlock but checks dst size.
|
||||
func EncodeBlockInto(dst, src []byte) (d int) {
|
||||
if MaxEncodedLen(len(src)) > len(dst) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// encodeBlock breaks on too big blocks, so split.
|
||||
for len(src) > 0 {
|
||||
p := src
|
||||
src = nil
|
||||
if len(p) > maxBlockSize {
|
||||
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||
}
|
||||
if len(p) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], p)
|
||||
} else {
|
||||
d += encodeBlock(dst[d:], p)
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if debugDecoder {
|
||||
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
|
||||
}
|
||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||
if err != nil {
|
||||
println("reading huffman table:", err)
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
|
||||
func (b *byteBuf) readByte() (byte, error) {
|
||||
bb := *b
|
||||
if len(bb) < 1 {
|
||||
return 0, nil
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
r := bb[0]
|
||||
*b = bb[1:]
|
||||
|
43
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
43
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@@ -32,14 +32,38 @@ func (d *dict) ID() uint32 {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// DictContentSize returns the dictionary content size or 0 if d is nil.
|
||||
func (d *dict) DictContentSize() int {
|
||||
// ContentSize returns the dictionary content size or 0 if d is nil.
|
||||
func (d *dict) ContentSize() int {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return len(d.content)
|
||||
}
|
||||
|
||||
// Content returns the dictionary content.
|
||||
func (d *dict) Content() []byte {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.content
|
||||
}
|
||||
|
||||
// Offsets returns the initial offsets.
|
||||
func (d *dict) Offsets() [3]int {
|
||||
if d == nil {
|
||||
return [3]int{}
|
||||
}
|
||||
return d.offsets
|
||||
}
|
||||
|
||||
// LitEncoder returns the literal encoder.
|
||||
func (d *dict) LitEncoder() *huff0.Scratch {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.litEnc
|
||||
}
|
||||
|
||||
// Load a dictionary as described in
|
||||
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
||||
func loadDict(b []byte) (*dict, error) {
|
||||
@@ -64,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
|
||||
var err error
|
||||
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("loading literal table: %w", err)
|
||||
}
|
||||
d.litEnc.Reuse = huff0.ReusePolicyMust
|
||||
|
||||
@@ -122,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
|
||||
func InspectDictionary(b []byte) (interface {
|
||||
ID() uint32
|
||||
ContentSize() int
|
||||
Content() []byte
|
||||
Offsets() [3]int
|
||||
LitEncoder() *huff0.Scratch
|
||||
}, error) {
|
||||
initPredefined()
|
||||
d, err := loadDict(b)
|
||||
return d, err
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
||||
if singleBlock {
|
||||
e.lowMem = true
|
||||
}
|
||||
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
|
||||
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
|
||||
e.lowMem = low
|
||||
}
|
||||
|
||||
|
65
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
65
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@@ -32,7 +32,6 @@ type match struct {
|
||||
length int32
|
||||
rep int32
|
||||
est int32
|
||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
||||
}
|
||||
|
||||
const highScore = 25000
|
||||
@@ -189,12 +188,6 @@ encodeLoop:
|
||||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
bestOf := func(a, b *match) *match {
|
||||
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
const goodEnough = 100
|
||||
|
||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
@@ -202,40 +195,41 @@ encodeLoop:
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
||||
// Set m to a match at offset if it looks like that will improve compression.
|
||||
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
return match{s: s, est: highScore}
|
||||
return
|
||||
}
|
||||
if debugAsserts {
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
}
|
||||
}
|
||||
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||
m.estBits(bitsPerByte)
|
||||
return m
|
||||
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||
cand.estBits(bitsPerByte)
|
||||
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
||||
*m = cand
|
||||
}
|
||||
}
|
||||
|
||||
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
||||
best := match{s: s, est: highScore}
|
||||
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||
|
||||
if canRepeat && best.length < goodEnough {
|
||||
cv32 := uint32(cv >> 8)
|
||||
spp := s + 1
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
improve(&best, spp-offset1, spp, cv32, 1)
|
||||
improve(&best, spp-offset2, spp, cv32, 2)
|
||||
improve(&best, spp-offset3, spp, cv32, 3)
|
||||
if best.length > 0 {
|
||||
cv32 = uint32(cv >> 24)
|
||||
spp += 2
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
improve(&best, spp-offset1, spp, cv32, 1)
|
||||
improve(&best, spp-offset2, spp, cv32, 2)
|
||||
improve(&best, spp-offset3, spp, cv32, 3)
|
||||
}
|
||||
}
|
||||
// Load next and check...
|
||||
@@ -262,18 +256,16 @@ encodeLoop:
|
||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||
|
||||
// Short at s+1
|
||||
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
// Long at s+1, s+2
|
||||
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
||||
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||
if false {
|
||||
// Short at s+3.
|
||||
// Too often worse...
|
||||
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||
best = bestOf(best, &m)
|
||||
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||
}
|
||||
// See if we can find a better match by checking where the current best ends.
|
||||
// Use that offset to see if we can find a better full match.
|
||||
@@ -284,13 +276,10 @@ encodeLoop:
|
||||
// For this compression level 2 yields the best results.
|
||||
const skipBeginning = 2
|
||||
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd := bestOf(best, &m)
|
||||
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd = bestOf(bestEnd, &m)
|
||||
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
}
|
||||
best = bestEnd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
6
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@@ -314,9 +314,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
if size-startSize == 424242 {
|
||||
panic("here")
|
||||
}
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
@@ -427,8 +424,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if space for literals
|
||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@@ -148,7 +148,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user