mirror of
https://github.com/containers/skopeo.git
synced 2025-09-17 07:19:37 +00:00
copy: add --dest-compress-format and --dest-compress-level
add the possibility to specify the format and the level to use when compressing blobs. Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
76
vendor/github.com/containers/image/copy/copy.go
generated
vendored
76
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -43,6 +42,9 @@ type digestingReader struct {
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
|
||||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
var compressionBufferSize = 1048576
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
@@ -86,14 +88,16 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
compressionFormat compression.Algorithm
|
||||
compressionLevel *int
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
@@ -166,6 +170,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
progressOutput = ioutil.Discard
|
||||
}
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
|
||||
c := &copier{
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
@@ -177,7 +182,18 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
compressionLevel: options.DestinationCtx.CompressionLevel,
|
||||
}
|
||||
// Default to using gzip compression unless specified otherwise.
|
||||
if options.DestinationCtx.CompressionFormat == nil {
|
||||
algo, err := compression.AlgorithmByName("gzip")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.compressionFormat = algo
|
||||
} else {
|
||||
c.compressionFormat = *options.DestinationCtx.CompressionFormat
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
@@ -805,7 +821,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
// === Detect compression of the input stream.
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||
decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
@@ -819,6 +835,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
originalLayerReader = destStream
|
||||
}
|
||||
|
||||
desiredCompressionFormat := c.compressionFormat
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
@@ -831,7 +849,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||
// we don’t care.
|
||||
go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
|
||||
go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
|
||||
compressionOperation = types.PreserveOriginal
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter
|
||||
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
@@ -847,6 +885,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
@@ -907,14 +946,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||
func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
|
||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := pgzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer compressor.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
buf := make([]byte, compressionBufferSize)
|
||||
|
||||
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
|
||||
}
|
||||
|
16
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
16
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@@ -59,9 +59,15 @@ func (s *ostreeImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) {
|
||||
func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
|
||||
var metadataKey string
|
||||
if isCompressed {
|
||||
metadataKey = "docker.uncompressed_size"
|
||||
} else {
|
||||
metadataKey = "docker.size"
|
||||
}
|
||||
b := fmt.Sprintf("ociimage/%s", blob)
|
||||
found, data, err := readMetadata(s.repo, b, "docker.size")
|
||||
found, data, err := readMetadata(s.repo, b, metadataKey)
|
||||
if err != nil || !found {
|
||||
return 0, err
|
||||
}
|
||||
@@ -275,8 +281,8 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
}
|
||||
|
||||
}
|
||||
compressedBlob, found := s.compressed[info.Digest]
|
||||
if found {
|
||||
compressedBlob, isCompressed := s.compressed[info.Digest]
|
||||
if isCompressed {
|
||||
blob = compressedBlob.Hex()
|
||||
}
|
||||
branch := fmt.Sprintf("ociimage/%s", blob)
|
||||
@@ -289,7 +295,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
s.repo = repo
|
||||
}
|
||||
|
||||
layerSize, err := s.getLayerSize(blob)
|
||||
layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
86
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
86
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@@ -3,6 +3,7 @@ package compression
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
@@ -35,32 +36,82 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(r), nil
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
var compressionAlgos = map[string]struct {
|
||||
prefix []byte
|
||||
decompressor DecompressorFunc
|
||||
}{
|
||||
"gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
|
||||
"bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
// compressorFunc writes the compressed stream to the given writer using the specified compression level.
|
||||
// The caller must call Close() on the stream (even if the input stream does not need closing!).
|
||||
type compressorFunc func(io.Writer, *int) (io.WriteCloser, error)
|
||||
|
||||
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
|
||||
func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||
if level != nil {
|
||||
return pgzip.NewWriterLevel(r, *level)
|
||||
}
|
||||
return pgzip.NewWriter(r), nil
|
||||
}
|
||||
|
||||
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
|
||||
func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||
return nil, fmt.Errorf("bzip2 compression not supported")
|
||||
}
|
||||
|
||||
// xzCompressor is a CompressorFunc for the xz compression algorithm.
|
||||
func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||
return xz.NewWriter(r)
|
||||
}
|
||||
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
prefix []byte
|
||||
decompressor DecompressorFunc
|
||||
compressor compressorFunc
|
||||
}
|
||||
|
||||
// Name returns the name for the compression algorithm.
|
||||
func (c Algorithm) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
var compressionAlgos = []Algorithm{
|
||||
{"gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor}, // gzip (RFC 1952)
|
||||
{"bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
{"xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
{"zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor}, // zstd (http://www.zstd.net)
|
||||
}
|
||||
|
||||
// AlgorithmByName returns the compressor by its name
|
||||
func AlgorithmByName(name string) (Algorithm, error) {
|
||||
for _, c := range compressionAlgos {
|
||||
if c.name == name {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name)
|
||||
}
|
||||
|
||||
// CompressStream returns the compressor by its name
|
||||
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||
return algo.compressor(dest, level)
|
||||
}
|
||||
|
||||
// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) {
|
||||
buffer := [8]byte{}
|
||||
|
||||
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
|
||||
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
|
||||
return nil, nil, err
|
||||
return Algorithm{}, nil, nil, err
|
||||
}
|
||||
|
||||
var retAlgo Algorithm
|
||||
var decompressor DecompressorFunc
|
||||
for name, algo := range compressionAlgos {
|
||||
for _, algo := range compressionAlgos {
|
||||
if bytes.HasPrefix(buffer[:n], algo.prefix) {
|
||||
logrus.Debugf("Detected compression format %s", name)
|
||||
logrus.Debugf("Detected compression format %s", algo.name)
|
||||
retAlgo = algo
|
||||
decompressor = algo.decompressor
|
||||
break
|
||||
}
|
||||
@@ -69,7 +120,14 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
logrus.Debugf("No compression detected")
|
||||
}
|
||||
|
||||
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
_, d, r, e := DetectCompressionFormat(input)
|
||||
return d, r, e
|
||||
}
|
||||
|
||||
// AutoDecompress takes a stream and returns an uncompressed version of the
|
||||
|
59
vendor/github.com/containers/image/pkg/compression/zstd.go
generated
vendored
Normal file
59
vendor/github.com/containers/image/pkg/compression/zstd.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package compression
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
type wrapperZstdDecoder struct {
|
||||
decoder *zstd.Decoder
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Close() error {
|
||||
w.decoder.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
return w.decoder.DecodeAll(input, dst)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Read(p []byte) (int, error) {
|
||||
return w.decoder.Read(p)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Reset(r io.Reader) error {
|
||||
return w.decoder.Reset(r)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) {
|
||||
return w.decoder.WriteTo(wr)
|
||||
}
|
||||
|
||||
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
|
||||
decoder, err := zstd.NewReader(buf)
|
||||
return &wrapperZstdDecoder{decoder: decoder}, err
|
||||
}
|
||||
|
||||
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
|
||||
return zstd.NewWriter(dest)
|
||||
}
|
||||
|
||||
func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) {
|
||||
el := zstd.EncoderLevelFromZstd(level)
|
||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||
}
|
||||
|
||||
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||
if level == nil {
|
||||
return zstdWriter(r)
|
||||
}
|
||||
return zstdWriterWithLevel(r, *level)
|
||||
}
|
||||
|
||||
// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm.
|
||||
func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return zstdReader(r)
|
||||
}
|
32
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
32
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@@ -32,6 +32,8 @@ var (
|
||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||
dockerLegacyHomePath = ".dockercfg"
|
||||
|
||||
enableKeyring = false
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
@@ -46,11 +48,11 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
|
||||
return false, setAuthToCredHelper(ch, registry, username, password)
|
||||
}
|
||||
|
||||
// Set the credentials to kernel keyring if sys.AuthFile is not specified.
|
||||
// Set the credentials to kernel keyring if enableKeyring is true.
|
||||
// The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms.
|
||||
// Hence, we want to fall-back to using the authfile in case the keyring failed.
|
||||
// However, if the sys.AuthFilePath is set, we want adhere to the user specification and not use the keyring.
|
||||
if sys.AuthFilePath == "" {
|
||||
// However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring.
|
||||
if enableKeyring {
|
||||
err := setAuthToKernelKeyring(registry, username, password)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username)
|
||||
@@ -74,10 +76,12 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
|
||||
}
|
||||
|
||||
username, password, err := getAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debug("returning credentials from kernel keyring")
|
||||
return username, password, nil
|
||||
if enableKeyring {
|
||||
username, password, err := getAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debug("returning credentials from kernel keyring")
|
||||
return username, password, nil
|
||||
}
|
||||
}
|
||||
|
||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
|
||||
@@ -117,13 +121,15 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||
return false, deleteAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// Next try kernel keyring
|
||||
err := deleteAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
|
||||
return false, nil
|
||||
// Next if keyring is enabled try kernel keyring
|
||||
if enableKeyring {
|
||||
err := deleteAuthFromKernelKeyring(registry)
|
||||
if err == nil {
|
||||
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
|
||||
return false, nil
|
||||
}
|
||||
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
|
||||
}
|
||||
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
|
||||
|
||||
if _, ok := auths.AuthConfigs[registry]; ok {
|
||||
delete(auths.AuthConfigs, registry)
|
||||
|
6
vendor/github.com/containers/image/types/types.go
generated
vendored
6
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@@ -511,6 +512,11 @@ type SystemContext struct {
|
||||
// === dir.Transport overrides ===
|
||||
// DirForceCompress compresses the image layers if set to true
|
||||
DirForceCompress bool
|
||||
|
||||
// CompressionFormat is the format to use for the compression of the blobs
|
||||
CompressionFormat *compression.Algorithm
|
||||
// CompressionLevel specifies what compression level is used
|
||||
CompressionLevel *int
|
||||
}
|
||||
|
||||
// ProgressProperties is used to pass information from the copy code to a monitor which
|
||||
|
4
vendor/github.com/containers/image/version/version.go
generated
vendored
4
vendor/github.com/containers/image/version/version.go
generated
vendored
@@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 0
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 3
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
Reference in New Issue
Block a user