Bump github.com/containers/storage from 1.15.1 to 1.15.2

Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.15.1 to 1.15.2.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.15.1...v1.15.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
This commit is contained in:
dependabot-preview[bot]
2019-12-06 09:20:44 +00:00
committed by Daniel J Walsh
parent 9c402f3799
commit afaa9e7f00
160 changed files with 37334 additions and 42809 deletions

View File

@@ -26,8 +26,12 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
### Status:
BETA - there may still be subtle bugs, but a wide variety of content has been tested.
There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
There may still be specific combinations of data types/size/settings that could lead to edge cases,
so as always, testing is recommended.
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
@@ -251,8 +255,12 @@ The converter `s` can be reused to avoid allocations, even after errors.
## Decompressor
STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
or run it past its limits with ANY input provided.
### Usage

View File

@@ -89,6 +89,7 @@ type blockDec struct {
sequenceBuf []seq
tmp [4]byte
err error
decWG sync.WaitGroup
}
func (b *blockDec) String() string {
@@ -105,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec {
input: make(chan struct{}, 1),
history: make(chan *history, 1),
}
b.decWG.Add(1)
go b.startDecoder()
return &b
}
@@ -183,11 +185,13 @@ func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
b.decWG.Wait()
}
// decodeAsync will prepare decoding the block when it receives input.
// This will separate output and history.
func (b *blockDec) startDecoder() {
defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {

View File

@@ -300,13 +300,13 @@ func (b *blockEnc) encodeRaw(a []byte) {
}
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits() error {
func (b *blockEnc) encodeLits(raw bool) error {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(b.literals)))
// Don't compress extremely small blocks
if len(b.literals) < 32 {
if len(b.literals) < 32 || raw {
if debug {
println("Adding RAW block, length", len(b.literals))
}
@@ -438,9 +438,9 @@ func fuzzFseEncoder(data []byte) int {
}
// encode will encode the block and put the output in b.output.
func (b *blockEnc) encode() error {
func (b *blockEnc) encode(raw bool) error {
if len(b.sequences) == 0 {
return b.encodeLits()
return b.encodeLits(raw)
}
// We want some difference
if len(b.literals) > (b.size - (b.size >> 5)) {
@@ -458,10 +458,10 @@ func (b *blockEnc) encode() error {
reUsed, single bool
err error
)
if len(b.literals) >= 1024 {
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 32 {
} else if len(b.literals) > 32 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)

View File

@@ -262,7 +262,7 @@ func (e *Encoder) nextBlock(final bool) error {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode()
err = blk.encode(e.o.noEntropy)
}
switch err {
case errIncompressible:
@@ -473,7 +473,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
err = blk.encode()
err = blk.encode(e.o.noEntropy)
}
switch err {

View File

@@ -20,6 +20,7 @@ type encoderOptions struct {
windowSize int
level EncoderLevel
fullZero bool
noEntropy bool
}
func (o *encoderOptions) setDefault() {
@@ -202,6 +203,16 @@ func WithZeroFrames(b bool) EOption {
}
}
// WithNoEntropyCompression will always skip entropy compression of literals.
// This can be useful if content has matches, but unlikely to benefit from entropy
// compression. Usually the slight speed improvement is not worth enabling this.
func WithNoEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.noEntropy = b
return nil
}
}
// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
// If this flag is set, data must be regenerated within a single continuous memory segment.
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.

View File

@@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
err := r.block.encodeLits()
err := r.block.encodeLits(false)
if err != nil {
return written, err
}
@@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
err = r.block.encode()
err = r.block.encode(false)
switch err {
case errIncompressible:
r.block.popOffsets()
@@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
println("snappy.Decode:", err)
return written, err
}
err = r.block.encodeLits()
err = r.block.encodeLits(false)
if err != nil {
return written, err
}
@@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
err := r.block.encodeLits()
err := r.block.encodeLits(false)
if err != nil {
return written, err
}