copy: add --dest-compress-format and --dest-compress-level

add the possibility to specify the format and the level to use when
compressing blobs.

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
Giuseppe Scrivano 2019-06-11 22:40:01 +02:00
parent 976dd83a62
commit a36d81c55c
No known key found for this signature in database
GPG Key ID: E4730F97F60286ED
13 changed files with 289 additions and 57 deletions

View File

@ -6,6 +6,7 @@ import (
"io"
"strings"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/urfave/cli"
@ -147,6 +148,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
if opts.noCreds {
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
}
return ctx, nil
}
@ -156,6 +158,8 @@ type imageDestOptions struct {
osTreeTmpDir string // A directory to use for OSTree temporary files
dirForceCompression bool // Compress layers when saving to the dir: transport
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
compressionFormat string // Format to use for the compression
compressionLevel optionalInt // Level to use for the compression
}
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
@ -179,6 +183,16 @@ func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefi
Usage: "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)",
Destination: &opts.ociAcceptUncompressedLayers,
},
cli.StringFlag{
Name: flagPrefix + "compress-format",
Usage: "`FORMAT` to use for the compression",
Destination: &opts.compressionFormat,
},
cli.GenericFlag{
Name: flagPrefix + "compress-level",
Usage: "`LEVEL` to use for the compression",
Value: newOptionalIntValue(&opts.compressionLevel),
},
}...), &opts
}
@ -193,6 +207,16 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
ctx.DirForceCompress = opts.dirForceCompression
ctx.OCIAcceptUncompressedLayers = opts.ociAcceptUncompressedLayers
if opts.compressionFormat != "" {
cf, err := compression.AlgorithmByName(opts.compressionFormat)
if err != nil {
return nil, err
}
ctx.CompressionFormat = &cf
}
if opts.compressionLevel.present {
ctx.CompressionLevel = &opts.compressionLevel.value
}
return ctx, err
}

View File

@ -58,6 +58,10 @@ If the authorization state is not found there, $HOME/.docker/config.json is chec
Existing signatures, if any, are preserved as well.
**--dest-compress-format** _format_ Specifies the compression format to use. Supported values are: `gzip` and `zstd`.
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
## EXAMPLES
To copy the layers of the docker.io busybox image to a local directory:

2
go.mod
View File

@ -6,7 +6,7 @@ require (
github.com/VividCortex/ewma v1.1.1 // indirect
github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8 // indirect
github.com/containers/buildah v1.8.4
github.com/containers/image v3.0.1+incompatible
github.com/containers/image v1.5.2-0.20190821161828-0cc0e97405db
github.com/containers/storage v1.13.0
github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 // indirect
github.com/docker/docker v0.0.0-20180522102801-da99009bbb11

10
go.sum
View File

@ -17,6 +17,10 @@ github.com/containers/image v1.5.2-0.20190717062552-2178abd5f9b1 h1:RGlzwWSoGBbc
github.com/containers/image v1.5.2-0.20190717062552-2178abd5f9b1/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
github.com/containers/image v1.5.2-0.20190725091050-48acc3dcbb76 h1:+9unAKrV92Jvifb06UK8H4xTKf7h7XQDOsn4EC9eqH4=
github.com/containers/image v1.5.2-0.20190725091050-48acc3dcbb76/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
github.com/containers/image v1.5.2-0.20190821113801-e003ccfc74de h1:6M4DvYlNvVUJppB9rWaYFw30fgzt09z2dCj4VlOl3e4=
github.com/containers/image v1.5.2-0.20190821113801-e003ccfc74de/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
github.com/containers/image v1.5.2-0.20190821161828-0cc0e97405db h1:ZtTWr+vb7qTLWjEW0CNP56ltYuj5t6y8AUkCxQAfH+Y=
github.com/containers/image v1.5.2-0.20190821161828-0cc0e97405db/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
github.com/containers/image v2.0.0+incompatible h1:FTr6Br7jlIKNCKMjSOMbAxKp2keQ0//jzJaYNTVhauk=
github.com/containers/image v2.0.0+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M=
github.com/containers/image v3.0.0+incompatible h1:pdUHY//H+3jYNnoTt+rqY8NsStX4ZBLKzPTlMC+XvnU=
@ -49,6 +53,12 @@ github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/giuseppe/image v0.0.0-20190723195852-da7a70cbb5f1 h1:j+wcaSLeY2xPBzeSU37obWjSdWvYjf8soPscPvmOGpY=
github.com/giuseppe/image v0.0.0-20190723195852-da7a70cbb5f1/go.mod h1:t1Xf5SPi6pYsMJfQTMHfTpWgaQkRuy+0HMhAOJOj01E=
github.com/giuseppe/image v0.0.0-20190806095912-8e15c8a868fc h1:qvH3jFNG7pyTd58rs/gNmtfTv7E9q8Xo7/RKLZn+zag=
github.com/giuseppe/image v0.0.0-20190806095912-8e15c8a868fc/go.mod h1:t1Xf5SPi6pYsMJfQTMHfTpWgaQkRuy+0HMhAOJOj01E=
github.com/giuseppe/image v0.0.0-20190813140229-9b055a514f21 h1:nUR9MenOi9gs8LlU5BSQ4zCmdTLc1Js5gT7rdymI3ZE=
github.com/giuseppe/image v0.0.0-20190813140229-9b055a514f21/go.mod h1:t1Xf5SPi6pYsMJfQTMHfTpWgaQkRuy+0HMhAOJOj01E=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f h1:r/AdTzqktq9nQpFlFePWcp+scVi+oFRajfjRJ3UnETg=

View File

@ -44,6 +44,21 @@ function setup() {
diff -urN $dir1 $dir2
}
# Compression zstd
@test "copy: oci, round trip" {
local remote_image=docker://busybox:latest
local dir=$TESTDIR/dir
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
# zstd magic number
local magic=$(printf "\x28\xb5\x2f\xfd")
# Check there is at least one file that has the zstd magic number as the first 4 bytes
(for i in $dir/blobs/sha256/*; do test "$(head -c 4 $i)" = $magic && exit 0; done; exit 1)
}
# Same image, extracted once with :tag and once without
@test "copy: oci w/ and w/o tags" {
local remote_image=docker://busybox:latest

View File

@ -21,7 +21,6 @@ import (
"github.com/containers/image/signature"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -43,6 +42,9 @@ type digestingReader struct {
// downloads. Let's follow Firefox by limiting it to 6.
var maxParallelDownloads = 6
// compressionBufferSize is the buffer size used to compress a blob
var compressionBufferSize = 1048576
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
@ -94,6 +96,8 @@ type copier struct {
progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache
copyInParallel bool
compressionFormat compression.Algorithm
compressionLevel *int
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -166,6 +170,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
progressOutput = ioutil.Discard
}
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
c := &copier{
dest: dest,
rawSource: rawSource,
@ -178,6 +183,17 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
compressionLevel: options.DestinationCtx.CompressionLevel,
}
// Default to using gzip compression unless specified otherwise.
if options.DestinationCtx.CompressionFormat == nil {
algo, err := compression.AlgorithmByName("gzip")
if err != nil {
return nil, err
}
c.compressionFormat = algo
} else {
c.compressionFormat = *options.DestinationCtx.CompressionFormat
}
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
@ -805,7 +821,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
}
@ -819,6 +835,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
originalLayerReader = destStream
}
desiredCompressionFormat := c.compressionFormat
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
@ -831,7 +849,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
compressionOperation = types.PreserveOriginal
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
@ -847,6 +885,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
inputInfo.Digest = ""
inputInfo.Size = -1
} else {
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
@ -907,14 +946,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
}()
zipper := pgzip.NewWriter(dest)
defer zipper.Close()
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel)
if err != nil {
return
}
defer compressor.Close()
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
}

View File

@ -59,9 +59,15 @@ func (s *ostreeImageSource) Close() error {
return nil
}
func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) {
func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
var metadataKey string
if isCompressed {
metadataKey = "docker.uncompressed_size"
} else {
metadataKey = "docker.size"
}
b := fmt.Sprintf("ociimage/%s", blob)
found, data, err := readMetadata(s.repo, b, "docker.size")
found, data, err := readMetadata(s.repo, b, metadataKey)
if err != nil || !found {
return 0, err
}
@ -275,8 +281,8 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
}
}
compressedBlob, found := s.compressed[info.Digest]
if found {
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Hex()
}
branch := fmt.Sprintf("ociimage/%s", blob)
@ -289,7 +295,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
s.repo = repo
}
layerSize, err := s.getLayerSize(blob)
layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
if err != nil {
return nil, 0, err
}

View File

@ -3,6 +3,7 @@ package compression
import (
"bytes"
"compress/bzip2"
"fmt"
"io"
"io/ioutil"
@ -35,32 +36,82 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
return ioutil.NopCloser(r), nil
}
// compressionAlgos is an internal implementation detail of DetectCompression
var compressionAlgos = map[string]struct {
prefix []byte
decompressor DecompressorFunc
}{
"gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
"bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
// compressorFunc writes the compressed stream to the given writer using the specified compression level.
// The caller must call Close() on the stream (even if the input stream does not need closing!).
type compressorFunc func(io.Writer, *int) (io.WriteCloser, error)
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
if level != nil {
return pgzip.NewWriterLevel(r, *level)
}
return pgzip.NewWriter(r), nil
}
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) {
return nil, fmt.Errorf("bzip2 compression not supported")
}
// xzCompressor is a CompressorFunc for the xz compression algorithm.
func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
return xz.NewWriter(r)
}
// Algorithm is a compression algorithm that can be used for CompressStream.
type Algorithm struct {
name string
prefix []byte
decompressor DecompressorFunc
compressor compressorFunc
}
// Name returns the name for the compression algorithm.
func (c Algorithm) Name() string {
return c.name
}
// compressionAlgos is an internal implementation detail of DetectCompression
var compressionAlgos = []Algorithm{
{"gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor}, // gzip (RFC 1952)
{"bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor}, // bzip2 (decompress.c:BZ2_decompress)
{"xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
{"zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor}, // zstd (http://www.zstd.net)
}
// AlgorithmByName returns the compressor by its name
func AlgorithmByName(name string) (Algorithm, error) {
for _, c := range compressionAlgos {
if c.name == name {
return c, nil
}
}
return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name)
}
// CompressStream returns the compressor by its name
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
return algo.compressor(dest, level)
}
// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) {
buffer := [8]byte{}
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
return nil, nil, err
return Algorithm{}, nil, nil, err
}
var retAlgo Algorithm
var decompressor DecompressorFunc
for name, algo := range compressionAlgos {
for _, algo := range compressionAlgos {
if bytes.HasPrefix(buffer[:n], algo.prefix) {
logrus.Debugf("Detected compression format %s", name)
logrus.Debugf("Detected compression format %s", algo.name)
retAlgo = algo
decompressor = algo.decompressor
break
}
@ -69,7 +120,14 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
logrus.Debugf("No compression detected")
}
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
}
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
_, d, r, e := DetectCompressionFormat(input)
return d, r, e
}
// AutoDecompress takes a stream and returns an uncompressed version of the

View File

@ -0,0 +1,59 @@
package compression
import (
"io"
"github.com/klauspost/compress/zstd"
)
type wrapperZstdDecoder struct {
decoder *zstd.Decoder
}
func (w *wrapperZstdDecoder) Close() error {
w.decoder.Close()
return nil
}
func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) {
return w.decoder.DecodeAll(input, dst)
}
func (w *wrapperZstdDecoder) Read(p []byte) (int, error) {
return w.decoder.Read(p)
}
func (w *wrapperZstdDecoder) Reset(r io.Reader) error {
return w.decoder.Reset(r)
}
func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) {
return w.decoder.WriteTo(wr)
}
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
decoder, err := zstd.NewReader(buf)
return &wrapperZstdDecoder{decoder: decoder}, err
}
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
}
func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) {
el := zstd.EncoderLevelFromZstd(level)
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
}
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
if level == nil {
return zstdWriter(r)
}
return zstdWriterWithLevel(r, *level)
}
// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm.
func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) {
return zstdReader(r)
}

View File

@ -32,6 +32,8 @@ var (
dockerHomePath = filepath.FromSlash(".docker/config.json")
dockerLegacyHomePath = ".dockercfg"
enableKeyring = false
// ErrNotLoggedIn is returned for users not logged into a registry
// that they are trying to logout of
ErrNotLoggedIn = errors.New("not logged in")
@ -46,11 +48,11 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
return false, setAuthToCredHelper(ch, registry, username, password)
}
// Set the credentials to kernel keyring if sys.AuthFile is not specified.
// Set the credentials to kernel keyring if enableKeyring is true.
// The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms.
// Hence, we want to fall-back to using the authfile in case the keyring failed.
// However, if the sys.AuthFilePath is set, we want adhere to the user specification and not use the keyring.
if sys.AuthFilePath == "" {
// However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring.
if enableKeyring {
err := setAuthToKernelKeyring(registry, username, password)
if err == nil {
logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username)
@ -74,11 +76,13 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
}
if enableKeyring {
username, password, err := getAuthFromKernelKeyring(registry)
if err == nil {
logrus.Debug("returning credentials from kernel keyring")
return username, password, nil
}
}
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
var paths []string
@ -117,13 +121,15 @@ func RemoveAuthentication(sys *types.SystemContext, registry string) error {
return false, deleteAuthFromCredHelper(ch, registry)
}
// Next try kernel keyring
// Next if keyring is enabled try kernel keyring
if enableKeyring {
err := deleteAuthFromKernelKeyring(registry)
if err == nil {
logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry)
return false, nil
}
logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles")
}
if _, ok := auths.AuthConfigs[registry]; ok {
delete(auths.AuthConfigs, registry)

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/pkg/compression"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go/v1"
)
@ -511,6 +512,11 @@ type SystemContext struct {
// === dir.Transport overrides ===
// DirForceCompress compresses the image layers if set to true
DirForceCompress bool
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
// CompressionLevel specifies what compression level is used
CompressionLevel *int
}
// ProgressProperties is used to pass information from the copy code to a monitor which

View File

@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 0
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1
VersionPatch = 3
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
VersionDev = "-dev"
)
// Version is the specification version that the package types support.

6
vendor/modules.txt vendored
View File

@ -26,7 +26,7 @@ github.com/VividCortex/ewma
github.com/containerd/continuity/pathdriver
# github.com/containers/buildah v1.8.4
github.com/containers/buildah/pkg/unshare
# github.com/containers/image v3.0.1+incompatible
# github.com/containers/image v1.5.2-0.20190821161828-0cc0e97405db
github.com/containers/image/copy
github.com/containers/image/directory
github.com/containers/image/docker
@ -34,12 +34,12 @@ github.com/containers/image/docker/reference
github.com/containers/image/image
github.com/containers/image/manifest
github.com/containers/image/pkg/blobinfocache
github.com/containers/image/pkg/compression
github.com/containers/image/signature
github.com/containers/image/storage
github.com/containers/image/transports
github.com/containers/image/transports/alltransports
github.com/containers/image/types
github.com/containers/image/pkg/compression
github.com/containers/image/directory/explicitfilepath
github.com/containers/image/docker/policyconfiguration
github.com/containers/image/pkg/blobinfocache/none
@ -164,11 +164,11 @@ github.com/gorilla/mux
# github.com/imdario/mergo v0.0.0-20141206190957-6633656539c1
github.com/imdario/mergo
# github.com/klauspost/compress v1.7.2
github.com/klauspost/compress/flate
github.com/klauspost/compress/zstd
github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy
github.com/klauspost/compress/zstd/internal/xxhash
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
# github.com/klauspost/cpuid v1.2.1
github.com/klauspost/cpuid