mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-07-19 17:26:28 +00:00
Feedback
Signed-off-by: David Gageot <david.gageot@docker.com>
This commit is contained in:
parent
c977b1b306
commit
eda59aa5ab
@ -27,7 +27,6 @@ require (
|
|||||||
github.com/gophercloud/gophercloud v0.1.0
|
github.com/gophercloud/gophercloud v0.1.0
|
||||||
github.com/gophercloud/utils v0.0.0-20181029231510-34f5991525d1
|
github.com/gophercloud/utils v0.0.0-20181029231510-34f5991525d1
|
||||||
github.com/hashicorp/go-version v1.2.0
|
github.com/hashicorp/go-version v1.2.0
|
||||||
github.com/klauspost/compress v1.15.1
|
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4
|
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4
|
||||||
github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a
|
github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
// drop-in 100% compatible replacement and 17% faster than compress/gzip.
|
||||||
gzip "github.com/klauspost/pgzip"
|
gzip "github.com/klauspost/pgzip"
|
||||||
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/pad4"
|
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/pad4"
|
||||||
"github.com/surma/gocpio"
|
"github.com/surma/gocpio"
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd/reference"
|
"github.com/containerd/containerd/reference"
|
||||||
|
// drop-in 100% compatible replacement and 17% faster than compress/gzip.
|
||||||
gzip "github.com/klauspost/pgzip"
|
gzip "github.com/klauspost/pgzip"
|
||||||
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/util"
|
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/util"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
@ -10,7 +10,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/klauspost/compress/gzip"
|
// drop-in 100% compatible replacement and 17% faster than compress/gzip.
|
||||||
|
gzip "github.com/klauspost/pgzip"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
351
src/cmd/linuxkit/vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
351
src/cmd/linuxkit/vendor/github.com/klauspost/compress/gzip/gunzip.go
generated
vendored
@ -1,351 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package gzip implements reading and writing of gzip format compressed files,
|
|
||||||
// as specified in RFC 1952.
|
|
||||||
package gzip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"encoding/binary"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
gzipID1 = 0x1f
|
|
||||||
gzipID2 = 0x8b
|
|
||||||
gzipDeflate = 8
|
|
||||||
flagText = 1 << 0
|
|
||||||
flagHdrCrc = 1 << 1
|
|
||||||
flagExtra = 1 << 2
|
|
||||||
flagName = 1 << 3
|
|
||||||
flagComment = 1 << 4
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
|
|
||||||
ErrChecksum = gzip.ErrChecksum
|
|
||||||
// ErrHeader is returned when reading GZIP data that has an invalid header.
|
|
||||||
ErrHeader = gzip.ErrHeader
|
|
||||||
)
|
|
||||||
|
|
||||||
var le = binary.LittleEndian
|
|
||||||
|
|
||||||
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
|
|
||||||
func noEOF(err error) error {
|
|
||||||
if err == io.EOF {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The gzip file stores a header giving metadata about the compressed file.
|
|
||||||
// That header is exposed as the fields of the Writer and Reader structs.
|
|
||||||
//
|
|
||||||
// Strings must be UTF-8 encoded and may only contain Unicode code points
|
|
||||||
// U+0001 through U+00FF, due to limitations of the GZIP file format.
|
|
||||||
type Header struct {
|
|
||||||
Comment string // comment
|
|
||||||
Extra []byte // "extra data"
|
|
||||||
ModTime time.Time // modification time
|
|
||||||
Name string // file name
|
|
||||||
OS byte // operating system type
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Reader is an io.Reader that can be read to retrieve
|
|
||||||
// uncompressed data from a gzip-format compressed file.
|
|
||||||
//
|
|
||||||
// In general, a gzip file can be a concatenation of gzip files,
|
|
||||||
// each with its own header. Reads from the Reader
|
|
||||||
// return the concatenation of the uncompressed data of each.
|
|
||||||
// Only the first header is recorded in the Reader fields.
|
|
||||||
//
|
|
||||||
// Gzip files store a length and checksum of the uncompressed data.
|
|
||||||
// The Reader will return a ErrChecksum when Read
|
|
||||||
// reaches the end of the uncompressed data if it does not
|
|
||||||
// have the expected length or checksum. Clients should treat data
|
|
||||||
// returned by Read as tentative until they receive the io.EOF
|
|
||||||
// marking the end of the data.
|
|
||||||
type Reader struct {
|
|
||||||
Header // valid after NewReader or Reader.Reset
|
|
||||||
r flate.Reader
|
|
||||||
br *bufio.Reader
|
|
||||||
decompressor io.ReadCloser
|
|
||||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
|
||||||
size uint32 // Uncompressed size (section 2.3.1)
|
|
||||||
buf [512]byte
|
|
||||||
err error
|
|
||||||
multistream bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new Reader reading the given reader.
|
|
||||||
// If r does not also implement io.ByteReader,
|
|
||||||
// the decompressor may read more data than necessary from r.
|
|
||||||
//
|
|
||||||
// It is the caller's responsibility to call Close on the Reader when done.
|
|
||||||
//
|
|
||||||
// The Reader.Header fields will be valid in the Reader returned.
|
|
||||||
func NewReader(r io.Reader) (*Reader, error) {
|
|
||||||
z := new(Reader)
|
|
||||||
if err := z.Reset(r); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return z, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the Reader z's state and makes it equivalent to the
|
|
||||||
// result of its original state from NewReader, but reading from r instead.
|
|
||||||
// This permits reusing a Reader rather than allocating a new one.
|
|
||||||
func (z *Reader) Reset(r io.Reader) error {
|
|
||||||
*z = Reader{
|
|
||||||
decompressor: z.decompressor,
|
|
||||||
multistream: true,
|
|
||||||
}
|
|
||||||
if rr, ok := r.(flate.Reader); ok {
|
|
||||||
z.r = rr
|
|
||||||
} else {
|
|
||||||
// Reuse if we can.
|
|
||||||
if z.br != nil {
|
|
||||||
z.br.Reset(r)
|
|
||||||
} else {
|
|
||||||
z.br = bufio.NewReader(r)
|
|
||||||
}
|
|
||||||
z.r = z.br
|
|
||||||
}
|
|
||||||
z.Header, z.err = z.readHeader()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multistream controls whether the reader supports multistream files.
|
|
||||||
//
|
|
||||||
// If enabled (the default), the Reader expects the input to be a sequence
|
|
||||||
// of individually gzipped data streams, each with its own header and
|
|
||||||
// trailer, ending at EOF. The effect is that the concatenation of a sequence
|
|
||||||
// of gzipped files is treated as equivalent to the gzip of the concatenation
|
|
||||||
// of the sequence. This is standard behavior for gzip readers.
|
|
||||||
//
|
|
||||||
// Calling Multistream(false) disables this behavior; disabling the behavior
|
|
||||||
// can be useful when reading file formats that distinguish individual gzip
|
|
||||||
// data streams or mix gzip data streams with other data streams.
|
|
||||||
// In this mode, when the Reader reaches the end of the data stream,
|
|
||||||
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
|
|
||||||
// it will be left positioned just after the gzip stream.
|
|
||||||
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
|
|
||||||
// If there is no next stream, z.Reset(r) will return io.EOF.
|
|
||||||
func (z *Reader) Multistream(ok bool) {
|
|
||||||
z.multistream = ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// readString reads a NUL-terminated string from z.r.
|
|
||||||
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
|
|
||||||
// will output a string encoded using UTF-8.
|
|
||||||
// This method always updates z.digest with the data read.
|
|
||||||
func (z *Reader) readString() (string, error) {
|
|
||||||
var err error
|
|
||||||
needConv := false
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
if i >= len(z.buf) {
|
|
||||||
return "", ErrHeader
|
|
||||||
}
|
|
||||||
z.buf[i], err = z.r.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if z.buf[i] > 0x7f {
|
|
||||||
needConv = true
|
|
||||||
}
|
|
||||||
if z.buf[i] == 0 {
|
|
||||||
// Digest covers the NUL terminator.
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
|
|
||||||
|
|
||||||
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
|
|
||||||
if needConv {
|
|
||||||
s := make([]rune, 0, i)
|
|
||||||
for _, v := range z.buf[:i] {
|
|
||||||
s = append(s, rune(v))
|
|
||||||
}
|
|
||||||
return string(s), nil
|
|
||||||
}
|
|
||||||
return string(z.buf[:i]), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readHeader reads the GZIP header according to section 2.3.1.
|
|
||||||
// This method does not set z.err.
|
|
||||||
func (z *Reader) readHeader() (hdr Header, err error) {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
|
|
||||||
// RFC 1952, section 2.2, says the following:
|
|
||||||
// A gzip file consists of a series of "members" (compressed data sets).
|
|
||||||
//
|
|
||||||
// Other than this, the specification does not clarify whether a
|
|
||||||
// "series" is defined as "one or more" or "zero or more". To err on the
|
|
||||||
// side of caution, Go interprets this to mean "zero or more".
|
|
||||||
// Thus, it is okay to return io.EOF here.
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
|
|
||||||
return hdr, ErrHeader
|
|
||||||
}
|
|
||||||
flg := z.buf[3]
|
|
||||||
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
|
|
||||||
// z.buf[8] is XFL and is currently ignored.
|
|
||||||
hdr.OS = z.buf[9]
|
|
||||||
z.digest = crc32.ChecksumIEEE(z.buf[:10])
|
|
||||||
|
|
||||||
if flg&flagExtra != 0 {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
|
|
||||||
data := make([]byte, le.Uint16(z.buf[:2]))
|
|
||||||
if _, err = io.ReadFull(z.r, data); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
|
|
||||||
hdr.Extra = data
|
|
||||||
}
|
|
||||||
|
|
||||||
var s string
|
|
||||||
if flg&flagName != 0 {
|
|
||||||
if s, err = z.readString(); err != nil {
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
hdr.Name = s
|
|
||||||
}
|
|
||||||
|
|
||||||
if flg&flagComment != 0 {
|
|
||||||
if s, err = z.readString(); err != nil {
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
hdr.Comment = s
|
|
||||||
}
|
|
||||||
|
|
||||||
if flg&flagHdrCrc != 0 {
|
|
||||||
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
|
|
||||||
return hdr, noEOF(err)
|
|
||||||
}
|
|
||||||
digest := le.Uint16(z.buf[:2])
|
|
||||||
if digest != uint16(z.digest) {
|
|
||||||
return hdr, ErrHeader
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
z.digest = 0
|
|
||||||
if z.decompressor == nil {
|
|
||||||
z.decompressor = flate.NewReader(z.r)
|
|
||||||
} else {
|
|
||||||
z.decompressor.(flate.Resetter).Reset(z.r, nil)
|
|
||||||
}
|
|
||||||
return hdr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
|
|
||||||
func (z *Reader) Read(p []byte) (n int, err error) {
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, z.err = z.decompressor.Read(p)
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
|
|
||||||
z.size += uint32(n)
|
|
||||||
if z.err != io.EOF {
|
|
||||||
// In the normal case we return here.
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finished file; check checksum and size.
|
|
||||||
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
|
|
||||||
z.err = noEOF(err)
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
digest := le.Uint32(z.buf[:4])
|
|
||||||
size := le.Uint32(z.buf[4:8])
|
|
||||||
if digest != z.digest || size != z.size {
|
|
||||||
z.err = ErrChecksum
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
z.digest, z.size = 0, 0
|
|
||||||
|
|
||||||
// File is ok; check if there is another.
|
|
||||||
if !z.multistream {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
z.err = nil // Remove io.EOF
|
|
||||||
|
|
||||||
if _, z.err = z.readHeader(); z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read from next file, if necessary.
|
|
||||||
if n > 0 {
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
return z.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support the io.WriteTo interface for io.Copy and friends.
|
|
||||||
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
total := int64(0)
|
|
||||||
crcWriter := crc32.NewIEEE()
|
|
||||||
for {
|
|
||||||
if z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We write both to output and digest.
|
|
||||||
mw := io.MultiWriter(w, crcWriter)
|
|
||||||
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
|
|
||||||
total += n
|
|
||||||
z.size += uint32(n)
|
|
||||||
if err != nil {
|
|
||||||
z.err = err
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finished file; check checksum + size.
|
|
||||||
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
z.err = err
|
|
||||||
return total, err
|
|
||||||
}
|
|
||||||
z.digest = crcWriter.Sum32()
|
|
||||||
digest := le.Uint32(z.buf[:4])
|
|
||||||
size := le.Uint32(z.buf[4:8])
|
|
||||||
if digest != z.digest || size != z.size {
|
|
||||||
z.err = ErrChecksum
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
z.digest, z.size = 0, 0
|
|
||||||
|
|
||||||
// File is ok; check if there is another.
|
|
||||||
if !z.multistream {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
crcWriter.Reset()
|
|
||||||
z.err = nil // Remove io.EOF
|
|
||||||
|
|
||||||
if _, z.err = z.readHeader(); z.err != nil {
|
|
||||||
if z.err == io.EOF {
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
return total, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Reader. It does not close the underlying io.Reader.
|
|
||||||
// In order for the GZIP checksum to be verified, the reader must be
|
|
||||||
// fully consumed until the io.EOF.
|
|
||||||
func (z *Reader) Close() error { return z.decompressor.Close() }
|
|
269
src/cmd/linuxkit/vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
269
src/cmd/linuxkit/vendor/github.com/klauspost/compress/gzip/gzip.go
generated
vendored
@ -1,269 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package gzip
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/flate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These constants are copied from the flate package, so that code that imports
|
|
||||||
// "compress/gzip" does not also have to import "compress/flate".
|
|
||||||
const (
|
|
||||||
NoCompression = flate.NoCompression
|
|
||||||
BestSpeed = flate.BestSpeed
|
|
||||||
BestCompression = flate.BestCompression
|
|
||||||
DefaultCompression = flate.DefaultCompression
|
|
||||||
ConstantCompression = flate.ConstantCompression
|
|
||||||
HuffmanOnly = flate.HuffmanOnly
|
|
||||||
|
|
||||||
// StatelessCompression will do compression but without maintaining any state
|
|
||||||
// between Write calls.
|
|
||||||
// There will be no memory kept between Write calls,
|
|
||||||
// but compression and speed will be suboptimal.
|
|
||||||
// Because of this, the size of actual Write calls will affect output size.
|
|
||||||
StatelessCompression = -3
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer is an io.WriteCloser.
|
|
||||||
// Writes to a Writer are compressed and written to w.
|
|
||||||
type Writer struct {
|
|
||||||
Header // written at first call to Write, Flush, or Close
|
|
||||||
w io.Writer
|
|
||||||
level int
|
|
||||||
err error
|
|
||||||
compressor *flate.Writer
|
|
||||||
digest uint32 // CRC-32, IEEE polynomial (section 8)
|
|
||||||
size uint32 // Uncompressed size (section 2.3.1)
|
|
||||||
wroteHeader bool
|
|
||||||
closed bool
|
|
||||||
buf [10]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter returns a new Writer.
|
|
||||||
// Writes to the returned writer are compressed and written to w.
|
|
||||||
//
|
|
||||||
// It is the caller's responsibility to call Close on the WriteCloser when done.
|
|
||||||
// Writes may be buffered and not flushed until Close.
|
|
||||||
//
|
|
||||||
// Callers that wish to set the fields in Writer.Header must do so before
|
|
||||||
// the first call to Write, Flush, or Close.
|
|
||||||
func NewWriter(w io.Writer) *Writer {
|
|
||||||
z, _ := NewWriterLevel(w, DefaultCompression)
|
|
||||||
return z
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
|
||||||
// of assuming DefaultCompression.
|
|
||||||
//
|
|
||||||
// The compression level can be DefaultCompression, NoCompression, or any
|
|
||||||
// integer value between BestSpeed and BestCompression inclusive. The error
|
|
||||||
// returned will be nil if the level is valid.
|
|
||||||
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
|
|
||||||
if level < StatelessCompression || level > BestCompression {
|
|
||||||
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
|
|
||||||
}
|
|
||||||
z := new(Writer)
|
|
||||||
z.init(w, level)
|
|
||||||
return z, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *Writer) init(w io.Writer, level int) {
|
|
||||||
compressor := z.compressor
|
|
||||||
if level != StatelessCompression {
|
|
||||||
if compressor != nil {
|
|
||||||
compressor.Reset(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*z = Writer{
|
|
||||||
Header: Header{
|
|
||||||
OS: 255, // unknown
|
|
||||||
},
|
|
||||||
w: w,
|
|
||||||
level: level,
|
|
||||||
compressor: compressor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset discards the Writer z's state and makes it equivalent to the
|
|
||||||
// result of its original state from NewWriter or NewWriterLevel, but
|
|
||||||
// writing to w instead. This permits reusing a Writer rather than
|
|
||||||
// allocating a new one.
|
|
||||||
func (z *Writer) Reset(w io.Writer) {
|
|
||||||
z.init(w, z.level)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBytes writes a length-prefixed byte slice to z.w.
|
|
||||||
func (z *Writer) writeBytes(b []byte) error {
|
|
||||||
if len(b) > 0xffff {
|
|
||||||
return errors.New("gzip.Write: Extra data is too large")
|
|
||||||
}
|
|
||||||
le.PutUint16(z.buf[:2], uint16(len(b)))
|
|
||||||
_, err := z.w.Write(z.buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = z.w.Write(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeString writes a UTF-8 string s in GZIP's format to z.w.
|
|
||||||
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
|
|
||||||
func (z *Writer) writeString(s string) (err error) {
|
|
||||||
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
|
|
||||||
needconv := false
|
|
||||||
for _, v := range s {
|
|
||||||
if v == 0 || v > 0xff {
|
|
||||||
return errors.New("gzip.Write: non-Latin-1 header string")
|
|
||||||
}
|
|
||||||
if v > 0x7f {
|
|
||||||
needconv = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if needconv {
|
|
||||||
b := make([]byte, 0, len(s))
|
|
||||||
for _, v := range s {
|
|
||||||
b = append(b, byte(v))
|
|
||||||
}
|
|
||||||
_, err = z.w.Write(b)
|
|
||||||
} else {
|
|
||||||
_, err = io.WriteString(z.w, s)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// GZIP strings are NUL-terminated.
|
|
||||||
z.buf[0] = 0
|
|
||||||
_, err = z.w.Write(z.buf[:1])
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes a compressed form of p to the underlying io.Writer. The
|
|
||||||
// compressed bytes are not necessarily flushed until the Writer is closed.
|
|
||||||
func (z *Writer) Write(p []byte) (int, error) {
|
|
||||||
if z.err != nil {
|
|
||||||
return 0, z.err
|
|
||||||
}
|
|
||||||
var n int
|
|
||||||
// Write the GZIP header lazily.
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.wroteHeader = true
|
|
||||||
z.buf[0] = gzipID1
|
|
||||||
z.buf[1] = gzipID2
|
|
||||||
z.buf[2] = gzipDeflate
|
|
||||||
z.buf[3] = 0
|
|
||||||
if z.Extra != nil {
|
|
||||||
z.buf[3] |= 0x04
|
|
||||||
}
|
|
||||||
if z.Name != "" {
|
|
||||||
z.buf[3] |= 0x08
|
|
||||||
}
|
|
||||||
if z.Comment != "" {
|
|
||||||
z.buf[3] |= 0x10
|
|
||||||
}
|
|
||||||
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
|
|
||||||
if z.level == BestCompression {
|
|
||||||
z.buf[8] = 2
|
|
||||||
} else if z.level == BestSpeed {
|
|
||||||
z.buf[8] = 4
|
|
||||||
} else {
|
|
||||||
z.buf[8] = 0
|
|
||||||
}
|
|
||||||
z.buf[9] = z.OS
|
|
||||||
n, z.err = z.w.Write(z.buf[:10])
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
if z.Extra != nil {
|
|
||||||
z.err = z.writeBytes(z.Extra)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.Name != "" {
|
|
||||||
z.err = z.writeString(z.Name)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.Comment != "" {
|
|
||||||
z.err = z.writeString(z.Comment)
|
|
||||||
if z.err != nil {
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if z.compressor == nil && z.level != StatelessCompression {
|
|
||||||
z.compressor, _ = flate.NewWriter(z.w, z.level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.size += uint32(len(p))
|
|
||||||
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
|
|
||||||
if z.level == StatelessCompression {
|
|
||||||
return len(p), flate.StatelessDeflate(z.w, p, false, nil)
|
|
||||||
}
|
|
||||||
n, z.err = z.compressor.Write(p)
|
|
||||||
return n, z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes any pending compressed data to the underlying writer.
|
|
||||||
//
|
|
||||||
// It is useful mainly in compressed network protocols, to ensure that
|
|
||||||
// a remote reader has enough data to reconstruct a packet. Flush does
|
|
||||||
// not return until the data has been written. If the underlying
|
|
||||||
// writer returns an error, Flush returns that error.
|
|
||||||
//
|
|
||||||
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
|
|
||||||
func (z *Writer) Flush() error {
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
if z.closed || z.level == StatelessCompression {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.Write(nil)
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
z.err = z.compressor.Flush()
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Writer, flushing any unwritten data to the underlying
|
|
||||||
// io.Writer, but does not close the underlying io.Writer.
|
|
||||||
func (z *Writer) Close() error {
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
if z.closed {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
z.closed = true
|
|
||||||
if !z.wroteHeader {
|
|
||||||
z.Write(nil)
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if z.level == StatelessCompression {
|
|
||||||
z.err = flate.StatelessDeflate(z.w, nil, true, nil)
|
|
||||||
} else {
|
|
||||||
z.err = z.compressor.Close()
|
|
||||||
}
|
|
||||||
if z.err != nil {
|
|
||||||
return z.err
|
|
||||||
}
|
|
||||||
le.PutUint32(z.buf[:4], z.digest)
|
|
||||||
le.PutUint32(z.buf[4:8], z.size)
|
|
||||||
_, z.err = z.w.Write(z.buf[:8])
|
|
||||||
return z.err
|
|
||||||
}
|
|
2
src/cmd/linuxkit/vendor/modules.txt
vendored
2
src/cmd/linuxkit/vendor/modules.txt
vendored
@ -326,11 +326,9 @@ github.com/hashicorp/go-version
|
|||||||
# github.com/jmespath/go-jmespath v0.4.0
|
# github.com/jmespath/go-jmespath v0.4.0
|
||||||
github.com/jmespath/go-jmespath
|
github.com/jmespath/go-jmespath
|
||||||
# github.com/klauspost/compress v1.15.1
|
# github.com/klauspost/compress v1.15.1
|
||||||
## explicit
|
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
github.com/klauspost/compress/gzip
|
|
||||||
github.com/klauspost/compress/huff0
|
github.com/klauspost/compress/huff0
|
||||||
github.com/klauspost/compress/internal/snapref
|
github.com/klauspost/compress/internal/snapref
|
||||||
github.com/klauspost/compress/zstd
|
github.com/klauspost/compress/zstd
|
||||||
|
Loading…
Reference in New Issue
Block a user