Compare commits

..

7 Commits

Author SHA1 Message Date
Daniel J Walsh
fcc57b1fa7 Merge pull request #993 from vrothberg/1.1.1
v1.1.1
2020-07-29 07:56:19 -04:00
Valentin Rothberg
13c69d48fa bump to v1.1.2-dev
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-29 11:06:46 +02:00
Valentin Rothberg
67abbb3cef v1.1.1
* Run htpasswd from our build-container instead of registry:2
 * vendor golang.org/x/text@v0.3.3

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-29 11:06:18 +02:00
Miloslav Trmač
747abd054f Merge pull request #984 from mtrmac/htpasswd-1.1
Run htpasswd from our build-container instead of registry:2
2020-07-16 20:20:36 +02:00
Miloslav Trmač
8a664856cd Run htpasswd from our build-container instead of registry:2
registry:2 no longer contains htpasswd.

Also don't use log_and_run ... >> $file
because that will cause the command to be logged to $file.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2020-07-16 19:09:51 +02:00
Daniel J Walsh
3f81ee46c3 Merge pull request #981 from vrothberg/1.1-update-x/text
[1.1] vendor golang.org/x/text@v0.3.3
2020-07-16 13:05:55 -04:00
Valentin Rothberg
14ba92b2f6 vendor golang.org/x/text@v0.3.3
Fixes: CVE-2020-14040
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
2020-07-16 11:23:08 +02:00
302 changed files with 9340 additions and 9856 deletions

3
.gitignore vendored
View File

@@ -1,7 +1,6 @@
*.1
/layers-*
/skopeo
result
# ignore JetBrains IDEs (GoLand) config folder
.idea
.idea

View File

@@ -15,12 +15,13 @@ notifications:
email: false
install:
# Ideally, the (brew update) should not be necessary and Travis would have fairly
# frequenstly updated OS images; thats not been the case historically.
# In particular, explicitly unlink python@2, which has been removed from Homebrew
# since the last OS image build (as of July 2020), but the Travis OS still
# contains it, and it prevents updating of Python 3.
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew unlink python@2 && brew install gpgme ; fi
# NOTE: The (brew update) should not be necessary, and slows things down;
# we include it as a workaround for https://github.com/Homebrew/brew/issues/3299
# ideally Travis should bake the (brew update) into its images
# (https://github.com/travis-ci/travis-ci/issues/8552 ), but thats only going
# to happen around November 2017 per https://blog.travis-ci.com/2017-10-16-a-new-default-os-x-image-is-coming .
# Remove the (brew update) at that time.
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update && brew install gpgme ; fi
script:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi

View File

@@ -134,7 +134,7 @@ When new PRs for [containers/image](https://github.com/containers/image) break `
- create out a new branch in your `skopeo` checkout and switch to it
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
- run `make vendor`
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now required by `containers/image`, or update skopeo for changed `containers/image` API)
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
- optionally add new integration tests to the skopeo repo
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
- iterate until tests pass and the PR is reviewed

View File

@@ -1,9 +1,13 @@
FROM golang:1.14-buster
FROM ubuntu:19.10
RUN apt-get update && \
apt-get install -y \
libdevmapper-dev \
libgpgme11-dev
RUN apt-get update && apt-get install -y \
golang \
libbtrfs-dev \
git-core \
libdevmapper-dev \
libgpgme11-dev \
go-md2man \
libglib2.0-dev
ENV GOPATH=/
WORKDIR /src/github.com/containers/skopeo

View File

@@ -25,9 +25,6 @@ BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
GO ?= go
GOBIN := $(shell $(GO) env GOBIN)
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
@@ -46,10 +43,8 @@ ifeq ($(DEBUG), 1)
override GOGCFLAGS += -N -l
endif
ifeq ($(GOOS), linux)
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
GO_DYN_FLAGS="-buildmode=pie"
endif
ifeq ($(shell $(GO) env GOOS), linux)
GO_DYN_FLAGS="-buildmode=pie"
endif
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
@@ -67,9 +62,6 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
EXTRA_LDFLAGS ?=
LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
MANPAGES_MD = $(wildcard docs/*.md)
MANPAGES ?= $(MANPAGES_MD:%.md=%)
@@ -93,8 +85,7 @@ help:
@echo
@echo " * 'install' - Install binaries and documents to system locations"
@echo " * 'binary' - Build skopeo with a container"
@echo " * 'static' - Build statically linked binary"
@echo " * 'bin/skopeo' - Build skopeo locally"
@echo " * 'binary-local' - Build skopeo locally"
@echo " * 'test-unit' - Execute unit tests"
@echo " * 'test-integration' - Execute integration tests"
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
@@ -107,25 +98,19 @@ help:
binary: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make bin/skopeo $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
# Update nix/nixpkgs.json its latest stable commit
.PHONY: nixpkgs
nixpkgs:
@nix run -f channel:nixos-20.03 nix-prefetch-git -c nix-prefetch-git \
--no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json
# Build statically linked binary
.PHONY: static
static:
@nix build -f nix/
mkdir -p ./bin
cp -rfp ./result/bin/* ./bin/
binary-static: cmd/skopeo
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
# Build w/o using containers
.PHONY: bin/skopeo
bin/skopeo:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} ${LDFLAGS} -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o $@ ./cmd/skopeo
binary-local:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
binary-local-static:
$(GPGME_ENV) $(GO) build $(MOD_VENDOR) -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
build-container:
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
@@ -141,7 +126,7 @@ docs-in-container:
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
clean:
rm -rf bin docs/*.1
rm -f skopeo docs/*.1
install: install-binary install-docs install-completions
install -d -m 755 ${SIGSTOREDIR}
@@ -150,9 +135,9 @@ install: install-binary install-docs install-completions
install -d -m 755 ${REGISTRIESDDIR}
install -m 644 default.yaml ${REGISTRIESDDIR}/default.yaml
install-binary: bin/skopeo
install-binary: ./skopeo
install -d -m 755 ${INSTALLDIR}
install -m 755 bin/skopeo ${INSTALLDIR}/skopeo
install -m 755 skopeo ${INSTALLDIR}/skopeo
install-docs: docs
install -d -m 755 ${MANINSTALLDIR}/man1

View File

@@ -6,7 +6,6 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
@@ -23,7 +22,6 @@ type copyOptions struct {
global *globalOptions
srcImage *imageOptions
destImage *imageDestOptions
retryOpts *retry.RetryOptions
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
@@ -39,11 +37,9 @@ func copyCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := copyOptions{global: global,
srcImage: srcOpts,
destImage: destOpts,
retryOpts: retryOpts,
}
cmd := &cobra.Command{
Use: "copy [command options] SOURCE-IMAGE DESTINATION-IMAGE",
@@ -56,14 +52,13 @@ Supported transports:
See skopeo(1) section "IMAGE NAMES" for the expected format
`, strings.Join(transports.ListNames(), ", ")),
RunE: commandAction(opts.run),
Example: `skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest`,
Example: `skopeo copy --sign-by dev@example.com container-storage:example/busybox:streaming docker://example/busybox:gold`,
}
adjustUsage(cmd)
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
flags.StringSliceVar(&opts.additionalTags, "additional-tag", []string{}, "additional tags (supports docker-archive)")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress output information when copying images")
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
@@ -183,19 +178,17 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
decConfig = cc.DecryptConfig
}
return retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
return err
}, opts.retryOpts)
_, err = copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,
SignBy: opts.signByFingerprint,
ReportWriter: stdout,
SourceCtx: sourceCtx,
DestinationCtx: destinationCtx,
ForceManifestMIMEType: manifestType,
ImageListSelection: imageListSelection,
OciDecryptConfig: decConfig,
OciEncryptLayers: encLayers,
OciEncryptConfig: encConfig,
})
return err
}

View File

@@ -6,26 +6,22 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/spf13/cobra"
)
type deleteOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func deleteCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := deleteOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "delete [command options] IMAGE-NAME",
@@ -42,7 +38,6 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -68,8 +63,5 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
return retry.RetryIfNecessary(ctx, func() error {
return ref.DeleteImage(ctx, sys)
}, opts.retryOpts)
return ref.DeleteImage(ctx, sys)
}

View File

@@ -5,36 +5,46 @@ import (
"fmt"
"io"
"strings"
"time"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/skopeo/cmd/skopeo/inspect"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// inspectOutput is the output format of (skopeo inspect), primarily so that we can format it with a simple json.MarshalIndent.
type inspectOutput struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}
type inspectOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
global *globalOptions
image *imageOptions
raw bool // Output the raw manifest instead of parsing information about the image
config bool // Output the raw config blob instead of parsing information about the image
}
func inspectCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := inspectOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "inspect [command options] IMAGE-NAME",
@@ -54,16 +64,10 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
flags.BoolVar(&opts.config, "config", false, "output configuration")
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
var (
rawManifest []byte
src types.ImageSource
imgInspect *types.ImageInspectInfo
)
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
@@ -81,11 +85,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return err
}
if err := retry.RetryIfNecessary(ctx, func() error {
src, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error parsing image name %q", imageName)
src, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
return fmt.Errorf("Error parsing image name %q: %v", imageName, err)
}
defer func() {
@@ -94,11 +96,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
}()
if err := retry.RetryIfNecessary(ctx, func() error {
rawManifest, _, err = src.GetManifest(ctx, nil)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error retrieving manifest for image")
rawManifest, _, err := src.GetManifest(ctx, nil)
if err != nil {
return fmt.Errorf("Error retrieving manifest for image: %v", err)
}
if opts.raw && !opts.config {
@@ -115,12 +115,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
if opts.config && opts.raw {
var configBlob []byte
if err := retry.RetryIfNecessary(ctx, func() error {
configBlob, err = img.ConfigBlob(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading configuration blob")
configBlob, err := img.ConfigBlob(ctx)
if err != nil {
return fmt.Errorf("Error reading configuration blob: %v", err)
}
_, err = stdout.Write(configBlob)
if err != nil {
@@ -128,12 +125,9 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
}
return nil
} else if opts.config {
var config *v1.Image
if err := retry.RetryIfNecessary(ctx, func() error {
config, err = img.OCIConfig(ctx)
return err
}, opts.retryOpts); err != nil {
return errors.Wrapf(err, "Error reading OCI-formatted configuration data")
config, err := img.OCIConfig(ctx)
if err != nil {
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
}
err = json.NewEncoder(stdout).Encode(config)
if err != nil {
@@ -142,18 +136,15 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
return nil
}
if err := retry.RetryIfNecessary(ctx, func() error {
imgInspect, err = img.Inspect(ctx)
return err
}, opts.retryOpts); err != nil {
imgInspect, err := img.Inspect(ctx)
if err != nil {
return err
}
outputData := inspect.Output{
outputData := inspectOutput{
Name: "", // Set below if DockerReference() is known
Tag: imgInspect.Tag,
// Digest is set below.
RepoTags: []string{}, // Possibly overridden for docker.Transport.
RepoTags: []string{}, // Possibly overriden for docker.Transport.
Created: imgInspect.Created,
DockerVersion: imgInspect.DockerVersion,
Labels: imgInspect.Labels,

View File

@@ -1,23 +0,0 @@
package inspect
import (
"time"
digest "github.com/opencontainers/go-digest"
)
// Output is the output format of (skopeo inspect),
// primarily so that we can format it with a simple json.MarshalIndent.
type Output struct {
Name string `json:",omitempty"`
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string
Os string
Layers []string
Env []string
}

View File

@@ -7,7 +7,6 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/pkg/blobinfocache"
@@ -18,19 +17,16 @@ import (
)
type layersOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func layersCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := layersOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Hidden: true,
@@ -42,7 +38,6 @@ func layersCmd(global *globalOptions) *cobra.Command {
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -65,20 +60,12 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
cache := blobinfocache.DefaultCache(sys)
var (
rawSource types.ImageSource
src types.ImageCloser
)
if err = retry.RetryIfNecessary(ctx, func() error {
rawSource, err = parseImageSource(ctx, opts.image, imageName)
return err
}, opts.retryOpts); err != nil {
rawSource, err := parseImageSource(ctx, opts.image, imageName)
if err != nil {
return err
}
if err = retry.RetryIfNecessary(ctx, func() error {
src, err = image.FromSource(ctx, sys, rawSource)
return err
}, opts.retryOpts); err != nil {
src, err := image.FromSource(ctx, sys, rawSource)
if err != nil {
if closeErr := rawSource.Close(); closeErr != nil {
return errors.Wrapf(err, " (close error: %v)", closeErr)
}
@@ -142,14 +129,8 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}()
for _, bd := range blobDigests {
var (
r io.ReadCloser
blobSize int64
)
if err = retry.RetryIfNecessary(ctx, func() error {
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
return err
}, opts.retryOpts); err != nil {
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
if err != nil {
return err
}
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
@@ -160,11 +141,8 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
}
}
var manifest []byte
if err = retry.RetryIfNecessary(ctx, func() error {
manifest, _, err = src.Manifest(ctx)
return err
}, opts.retryOpts); err != nil {
manifest, _, err := src.Manifest(ctx)
if err != nil {
return err
}
if err := dest.PutManifest(ctx, manifest, nil); err != nil {

View File

@@ -7,7 +7,6 @@ import (
"io"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
@@ -23,20 +22,17 @@ type tagListOutput struct {
}
type tagsOptions struct {
global *globalOptions
image *imageOptions
retryOpts *retry.RetryOptions
global *globalOptions
image *imageOptions
}
func tagsCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, "", "")
retryFlags, retryOpts := retryFlags()
opts := tagsOptions{
global: global,
image: imageOpts,
retryOpts: retryOpts,
global: global,
image: imageOpts,
}
cmd := &cobra.Command{
Use: "list-tags [command options] REPOSITORY-NAME",
@@ -55,7 +51,6 @@ See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
flags := cmd.Flags()
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&imageFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -123,12 +118,8 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
return err
}
var repositoryName string
var tagListing []string
if err = retry.RetryIfNecessary(ctx, func() error {
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
return err
}, opts.retryOpts); err != nil {
repositoryName, tagListing, err := listDockerTags(ctx, sys, imgRef)
if err != nil {
return err
}

View File

@@ -11,7 +11,6 @@ import (
"regexp"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
@@ -26,15 +25,14 @@ import (
// syncOptions contains information retrieved from the skopeo sync command line.
type syncOptions struct {
global *globalOptions // Global (not command dependent) skopeo options
global *globalOptions // Global (not command dependant) skopeo options
srcImage *imageOptions // Source image options
destImage *imageDestOptions // Destination image options
retryOpts *retry.RetryOptions
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
removeSignatures bool // Do not copy signatures from the source image
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
source string // Source repository name
destination string // Destination registry name
scoped bool // When true, namespace copied images at destination using the source repository name
}
// repoDescriptor contains information of a single repository used as a sync source.
@@ -67,13 +65,11 @@ func syncCmd(global *globalOptions) *cobra.Command {
sharedFlags, sharedOpts := sharedImageFlags()
srcFlags, srcOpts := dockerImageFlags(global, sharedOpts, "src-", "screds")
destFlags, destOpts := dockerImageFlags(global, sharedOpts, "dest-", "dcreds")
retryFlags, retryOpts := retryFlags()
opts := syncOptions{
global: global,
srcImage: srcOpts,
destImage: &imageDestOptions{imageOptions: destOpts},
retryOpts: retryOpts,
}
cmd := &cobra.Command{
@@ -99,7 +95,6 @@ See skopeo-sync(1) for details.
flags.AddFlagSet(&sharedFlags)
flags.AddFlagSet(&srcFlags)
flags.AddFlagSet(&destFlags)
flags.AddFlagSet(&retryFlags)
return cmd
}
@@ -514,15 +509,9 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
sourceArg := args[0]
var srcRepoList []repoDescriptor
if err = retry.RetryIfNecessary(ctx, func() error {
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
return err
}, opts.retryOpts); err != nil {
srcRepoList, err := imagesToCopy(sourceArg, opts.source, sourceCtx)
if err != nil {
return err
}
@@ -532,6 +521,9 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
return err
}
ctx, cancel := opts.global.commandTimeoutContext()
defer cancel()
imagesNumber := 0
options := copy.Options{
RemoveSignatures: opts.removeSignatures,
@@ -571,10 +563,8 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
"to": transports.ImageName(destRef),
}).Infof("Copying image tag %d/%d", counter+1, len(srcRepo.TaggedImages))
if err = retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
return err
}, opts.retryOpts); err != nil {
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
if err != nil {
return errors.Wrapf(err, "Error copying tag %q", transports.ImageName(ref))
}
imagesNumber++

View File

@@ -6,7 +6,6 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@@ -57,7 +56,6 @@ type dockerImageOptions struct {
shared *sharedImageOptions // May be shared across several imageOptions instances.
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
credsOption optionalString // username[:password] for accessing a registry
registryToken optionalString // token to be used directy as a Bearer token when accessing the registry
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
noCreds bool // Access the registry anonymously
@@ -93,7 +91,6 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, flagPre
f := fs.VarPF(newOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
f.Hidden = true
}
fs.Var(newOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
optionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)")
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
@@ -111,17 +108,6 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
return fs, opts
}
type retryOptions struct {
maxRetry int // The number of times to possibly retry
}
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
opts := retry.RetryOptions{}
fs := pflag.FlagSet{}
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
return fs, &opts
}
// newSystemContext returns a *types.SystemContext corresponding to opts.
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
@@ -152,9 +138,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return nil, err
}
}
if opts.registryToken.present {
ctx.DockerBearerRegistryToken = opts.registryToken.value
}
if opts.noCreds {
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
}
@@ -162,7 +145,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
return ctx, nil
}
// imageDestOptions is a superset of imageOptions specialized for image destinations.
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport

View File

@@ -54,7 +54,6 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -68,7 +67,6 @@ func TestImageOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,
@@ -166,7 +164,6 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
"--dest-daemon-host", "daemon-host.example.com",
"--dest-tls-verify=false",
"--dest-creds", "creds-user:creds-password",
"--dest-registry-token", "faketoken",
})
res, err = opts.newSystemContext()
require.NoError(t, err)
@@ -180,7 +177,6 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
DockerCertPath: "/srv/cert-dir",
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
DockerBearerRegistryToken: "faketoken",
DockerDaemonCertPath: "/srv/cert-dir",
DockerDaemonHost: "daemon-host.example.com",
DockerDaemonInsecureSkipTLSVerify: true,

View File

@@ -49,8 +49,6 @@ _skopeo_copy() {
--dest-tls-verify
--src-daemon-host
--dest-daemon-host
--src-registry-token
--dest-registry-token
"
local boolean_options="
@@ -75,8 +73,6 @@ _skopeo_inspect() {
--authfile
--creds
--cert-dir
--retry-times
--registry-token
"
local boolean_options="
--config
@@ -123,7 +119,6 @@ _skopeo_delete() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
@@ -140,14 +135,11 @@ _skopeo_delete() {
_skopeo_layers() {
local options_with_args="
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="
--tls-verify
--no-creds
"
_complete_ "$options_with_args" "$boolean_options"
}
@@ -157,7 +149,6 @@ _skopeo_list_repository_tags() {
--authfile
--creds
--cert-dir
--registry-token
"
local boolean_options="

View File

@@ -18,7 +18,7 @@ using the latest Fedora and then Skopeo is installed into them:
## Sample Usage
Although not required, it is suggested that [Podman](https://github.com/containers/podman) be used with these container images.
Although not required, it is suggested that [Podman](https://github.com/containers/libpod) be used with these container images.
```
# Get Help on Skopeo

View File

@@ -13,7 +13,7 @@ FROM registry.fedoraproject.org/fedora:32
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -14,7 +14,7 @@ FROM registry.fedoraproject.org/fedora:32
# up space. Also reinstall shadow-utils as without
# doing so, the setuid/setgid bits on newuidmap
# and newgidmap are lost in the Fedora images.
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
RUN useradd skopeo; yum -y update; yum -y reinstall shadow-utils; yum -y install skopeo fuse-overlayfs --enablerepo updates-testing --exclude container-selinux; yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -29,12 +29,11 @@ mkdir /root/skopeo; \
git clone https://github.com/containers/skopeo /root/skopeo/src/github.com/containers/skopeo; \
export GOPATH=/root/skopeo; \
cd /root/skopeo/src/github.com/containers/skopeo; \
make bin/skopeo;\
make binary-local;\
make install;\
rm -rf /root/skopeo/*; \
yum -y remove git golang go-md2man make; \
yum -y clean all; yum -y clean all; rm -rf /var/cache/dnf/* /var/log/dnf* /var/log/yum*
yum clean all; rm -rf /var/cache /var/log/dnf* /var/log/yum.*;
# Adjust storage.conf to enable Fuse storage.
RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' /etc/containers/storage.conf

View File

@@ -15,9 +15,6 @@ Uses the system's trust policy to validate images, rejects images not trusted by
_destination-image_ use the "image name" format described above
_source-image_ and _destination-image_ are interpreted completely independently; e.g. the destination name does not
automatically inherit any parts of the source name.
## OPTIONS
**--all**
@@ -50,29 +47,29 @@ Path of the authentication file for the destination registry. Uses path given by
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
**--encryption-key** _protocol:keyfile_ specifies the encryption protocol, which can be JWE (RFC7516), PGP (RFC4880), and PKCS7 (RFC2315) and the key material required for image encryption. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file.
**--encryption-key** _Key_ a reference prefixed with the encryption protocol to use. The supported protocols are JWE, PGP and PKCS7. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file. This feature is still *experimental*.
**--decryption-key** _key[:passphrase]_ to be used for decryption of images. Key can point to keys and/or certificates. Decryption will be tried with all keys. If the key is protected by a passphrase, it is required to be passed in the argument and omitted otherwise.
**--decryption-key** _Key_ a reference required to perform decryption of container images. This should point to files which represent keys and/or certificates that can be used for decryption. Decryption will be tried with all keys. This feature is still *experimental*.
**--src-creds** _username[:password]_ for accessing the source registry.
**--src-creds** _username[:password]_ for accessing the source registry
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source).
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed).
**--dest-oci-accept-uncompressed-layers** _bool-value_ Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)
**--dest-creds** _username[:password]_ for accessing the destination registry.
**--dest-creds** _username[:password]_ for accessing the destination registry
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon.
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
**--src-no-creds** _bool-value_ Access the registry anonymously.
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true).
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon.
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
**--dest-no-creds** _bool-value_ Access the registry anonymously.
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true).
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
@@ -84,17 +81,8 @@ Existing signatures, if any, are preserved as well.
**--dest-compress-level** _format_ Specifies the compression level to use. The value is specific to the compression algorithm used, e.g. for zstd the accepted values are in the range 1-20 (inclusive), while for gzip it is 1-9 (inclusive).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
To just copy an image from one registry to another:
```sh
$ skopeo copy docker://quay.io/skopeo/stable:latest docker://registry.example.com/skopeo:latest
```
To copy the layers of the docker.io busybox image to a local directory:
```sh
$ mkdir -p /var/lib/images/busybox

View File

@@ -24,18 +24,16 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry.
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--no-creds** _bool-value_ Access the registry anonymously.
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
Mark image example/pause for deletion from the registry.example.com registry:

View File

@@ -25,18 +25,14 @@ Return low-level information about _image-name_ in a registry
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--retry-times** the number of times to retry, retry wait time will be exponentially increased based on the number of failed attempts.
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--no-creds** _bool-value_ Access the registry anonymously.
**--registry-token** _Bearer token_ for accessing the registry.
## EXAMPLES
To review information for the image fedora from the docker.io registry:

View File

@@ -15,16 +15,14 @@ Return a list of tags from _repository-name_ in a registry.
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `skopeo login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--creds** _username[:password]_ for accessing the registry.
**--creds** _username[:password]_ for accessing the registry
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry.
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
**--no-creds** _bool-value_ Access the registry anonymously.
**--registry-token** _Bearer token_ for accessing the registry.
## REPOSITORY NAMES
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.

View File

@@ -4,7 +4,7 @@
skopeo\-login - Login to a container registry
## SYNOPSIS
**skopeo login** [*options*] *registry*
**skoepo login** [*options*] *registry*
## DESCRIPTION
**skopeo login** logs into a specified registry server with the correct username

View File

@@ -71,10 +71,6 @@ Path of the authentication file for the destination registry. Uses path given by
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to a container destination registry or daemon (defaults to true).
**--src-registry-token** _Bearer token_ for accessing the source registry.
**--dest-registry-token** _Bearer token_ for accessing the destination registry.
## EXAMPLES
### Synchronizing to a local directory

View File

@@ -44,11 +44,6 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
**oci:**_path_**:**_tag_
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
**oci-archive:**_path_**:**_tag_
An image _tag_ in a tar archive compliant with "Open Container Image Layout Specification" at _path_.
See [containers-transports(5)](https://github.com/containers/image/blob/master/docs/containers-transports.5.md) for details.
## OPTIONS
**--command-timeout** _duration_ Timeout for the command execution.

10
go.mod
View File

@@ -3,17 +3,17 @@ module github.com/containers/skopeo
go 1.12
require (
github.com/containers/common v0.22.0
github.com/containers/image/v5 v5.6.0
github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.23.5
github.com/containers/common v0.14.0
github.com/containers/image/v5 v5.5.1
github.com/containers/ocicrypt v1.0.2
github.com/containers/storage v1.20.2
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f
github.com/dsnet/compress v0.0.1 // indirect
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
github.com/opencontainers/runc v1.0.0-rc92 // indirect
github.com/opencontainers/runtime-spec v1.0.0 // indirect
github.com/pkg/errors v0.9.1
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/sirupsen/logrus v1.6.0

127
go.sum
View File

@@ -7,6 +7,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -21,49 +23,43 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 h1:rG1clvJbgsUcmb50J82YUJhUMopWNtZvyMZjb+4fqGw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containers/common v0.22.0 h1:MjJIMka4pJddHsfZpQCF7jOmX6vXqMs0ojDeYmPKoSk=
github.com/containers/common v0.22.0/go.mod h1:qsLcLHM7ha5Nc+JDp5duBwfwEfrnlfjXL/K8HO96QHw=
github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/image/v5 v5.6.0 h1:r4AqIX4NO/X7OJkqX574zITV3fq0ZPn0pSlLsxWF6ww=
github.com/containers/image/v5 v5.6.0/go.mod h1:iUSWo3SOLqJo0CkZkKrHxqR6YWqrT98mkXFpE0MceE8=
github.com/containers/common v0.14.0 h1:hiZFDPf6ajKiDmojN5f5X3gboKPO73NLrYb0RXfrQiA=
github.com/containers/common v0.14.0/go.mod h1:9olhlE+WhYof1npnMJdyRMX14/yIUint6zyHzcyRVAg=
github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.19.1 h1:YKIzOO12iaD5Ra0PKFS6emcygbHLmwmQOCQRU/19YAQ=
github.com/containers/storage v1.19.1/go.mod h1:KbXjSwKnx17ejOsjFcCXSf78mCgZkQSLPBNTMRc3XrQ=
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
github.com/containers/storage v1.23.5 h1:He9I6y1vRVXYoQg4v2Q9HFAcX4dI3V5MCCrjeBcjkCY=
github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -88,6 +84,7 @@ github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5Jflh
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -98,7 +95,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
@@ -109,8 +105,8 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -121,6 +117,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -131,19 +128,18 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -154,15 +150,17 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc=
github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -185,16 +183,12 @@ github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJd
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -202,11 +196,11 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@@ -216,19 +210,19 @@ github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d h1:X9WSFjjZNqYRqO2MenUgqE2nj/oydcfIzXJ0R/SVnnA=
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4=
github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 h1:NhsM2gc769rVWDqJvapK37r+7+CBXI8xHhnfnt8uQsg=
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg=
github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -258,15 +252,15 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -286,13 +280,18 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
@@ -302,25 +301,19 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
github.com/vbauerster/mpb/v5 v5.2.2 h1:zIICVOm+XD+uV6crpSORaL6I0Q1WqOdvxZTp+r3L9cw=
github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww=
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
@@ -328,10 +321,6 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -359,7 +348,9 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -381,24 +372,23 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -417,6 +407,7 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -447,6 +438,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -456,3 +448,4 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@@ -8,7 +8,7 @@ bundle_test_integration() {
# subshell so that we can export PATH without breaking other things
(
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
bundle_test_integration
) 2>&1

View File

@@ -11,7 +11,7 @@ sed -i \
/etc/containers/storage.conf
# Build skopeo, install into /usr/bin
make bin/skopeo ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
make install
# Run tests

View File

@@ -12,6 +12,6 @@ go version
GO111MODULE=off go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
cd ${_containers}/skopeo
make validate-local test-unit-local bin/skopeo
make validate-local test-unit-local binary-local
sudo make install
skopeo -v

View File

@@ -114,7 +114,7 @@ Make sure to clone this repository in your `GOPATH` - otherwise compilation fail
```bash
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
$ cd $GOPATH/src/github.com/containers/skopeo && make bin/skopeo
$ cd $GOPATH/src/github.com/containers/skopeo && make binary-local
```
### Building in a container
@@ -130,6 +130,12 @@ Building in a container is simpler, but more restrictive:
$ make binary # Or (make all) to also build documentation, see below.
```
To build a pure-Go static binary (disables devicemapper, btrfs, and gpgme):
```bash
$ make binary-static DISABLE_CGO=1
```
### Building documentation
To build the manual you will need go-md2man.

View File

@@ -488,7 +488,7 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
// "push": dir: → atomic:
@@ -509,7 +509,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// "pull": docker: → dir:
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+dir1)
// "push": dir: → docker(v2s2):
@@ -626,7 +626,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
// Since the image is partially encrypted we should find layers that aren't encrypted
matchLayerBlobBinaryType(c, partiallyEncryptedImgDir+"/blobs/sha256", "application/x-gzip", 2)
// Decrypt the partially encrypted image
// Decrypt the partically encrypted image
assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key",
"oci:"+partiallyEncryptedImgDir+":encrypted", "oci:"+partiallyDecryptedImgDir+":decrypted")
@@ -720,7 +720,7 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(dir2)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// streaming: docker: → atomic:
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
// Compare (copies of) the original and the copy:
@@ -1087,7 +1087,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
defer os.Remove(policy)
// We use X-R-S-S for this testing to avoid having to deal with the sigstores.
// A downside is that OpenShift records signatures per image, so the error messages below
// A downside is that OpenShift records signatures per image, so the error messsages below
// list all signatures for other tags used for the same image as well.
// So, make sure to never create a signature that could be considered valid in a different part of the test (i.e. don't reuse tags).
@@ -1103,7 +1103,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
// Sign the image for the mirror
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by", "personal@example.com", regPrefix+"primary:unsigned", regPrefix+"mirror:mirror-signed")
// Verify that a correctly signed image for the mirror is accessible using the mirror's reference
// Verify that a correctly signed image for the mirror is acessible using the mirror's reference
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"mirror:mirror-signed", dirDest)
// … but verify that while it is accessible using the primary location redirecting to the mirror, …
assertSkopeoSucceeds(c, "" /* no --policy */, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)

View File

@@ -19,7 +19,7 @@ to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make bin/skopeo install
cd ..; make binary-local install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json
curl -L -v 'http://localhost:5000/v2/'

View File

@@ -94,7 +94,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -118,7 +118,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
}
func (s *SyncSuite) TestScoped(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -138,7 +138,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
}
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -173,7 +173,7 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "alpine"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -350,7 +350,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
image := "busybox"
tag := "latest"
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
// copy docker => docker
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image+":"+tag, localRegURL+image+":"+tag)
@@ -403,7 +403,7 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
imageRef, err := docker.ParseReference(fmt.Sprintf("//%s", image))
c.Assert(err, check.IsNil)
@@ -436,7 +436,7 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
image := "busybox:latest"
dir1 := path.Join(tmpDir, "dir1")

View File

@@ -1,57 +0,0 @@
{ system ? builtins.currentSystem }:
let
pkgs = (import ./nixpkgs.nix {
config = {
packageOverrides = pkg: {
gpgme = (static pkg.gpgme);
libassuan = (static pkg.libassuan);
libgpgerror = (static pkg.libgpgerror);
libseccomp = (static pkg.libseccomp);
glib = (static pkg.glib).overrideAttrs(x: {
outputs = [ "bin" "out" "dev" ];
mesonFlags = [
"-Ddefault_library=static"
"-Ddevbindir=${placeholder ''dev''}/bin"
"-Dgtk_doc=false"
"-Dnls=disabled"
];
});
};
};
});
static = pkg: pkg.overrideAttrs(x: {
doCheck = false;
configureFlags = (x.configureFlags or []) ++ [
"--without-shared"
"--disable-shared"
];
dontDisableStatic = true;
enableSharedExecutables = false;
enableStatic = true;
});
self = with pkgs; buildGoModule rec {
name = "skopeo";
src = ./..;
vendorSha256 = null;
doCheck = false;
enableParallelBuilding = true;
outputs = [ "out" ];
nativeBuildInputs = [ bash git go-md2man installShellFiles makeWrapper pkg-config which ];
buildInputs = [ glibc glibc.static gpgme libassuan libgpgerror libseccomp ];
prePatch = ''
export CFLAGS='-static'
export LDFLAGS='-s -w -static-libgcc -static'
export EXTRA_LDFLAGS='-s -w -linkmode external -extldflags "-static -lm"'
export BUILDTAGS='static netgo exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
'';
buildPhase = ''
patchShebangs .
make bin/skopeo
'';
installPhase = ''
install -Dm755 bin/skopeo $out/bin/skopeo
'';
};
in self

View File

@@ -1,7 +0,0 @@
{
"url": "https://github.com/nixos/nixpkgs",
"rev": "d5a689edda8219a1e20fd3871174b994cf0a94a3",
"date": "2020-09-13T01:58:20+02:00",
"sha256": "0m6nmi1fx0glfbg52kqdjgidxylk4p5xnx9v35wlsfi1j2xhkia4",
"fetchSubmodules": false
}

View File

@@ -1,8 +0,0 @@
let
json = builtins.fromJSON (builtins.readFile ./nixpkgs.json);
nixpkgs = import (builtins.fetchTarball {
name = "nixos-unstable";
url = "${json.url}/archive/${json.rev}.tar.gz";
inherit (json) sha256;
});
in nixpkgs

90
vendor/github.com/containerd/containerd/log/context.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"sync/atomic"
"github.com/sirupsen/logrus"
)
var (
// G is an alias for GetLogger.
//
// We may want to define this locally to a package to get package tagged log
// messages.
G = GetLogger
// L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger())
)
type (
loggerKey struct{}
)
// TraceLevel is the log level for tracing. Trace level is lower than debug level,
// and is usually used to trace detailed behavior of the program.
const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time is always the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// ParseLevel takes a string level and returns the Logrus log level constant.
// It supports trace level.
func ParseLevel(lvl string) (logrus.Level, error) {
if lvl == "trace" {
return TraceLevel, nil
}
return logrus.ParseLevel(lvl)
}
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
return context.WithValue(ctx, loggerKey{}, logger)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
func GetLogger(ctx context.Context) *logrus.Entry {
logger := ctx.Value(loggerKey{})
if logger == nil {
return L
}
return logger.(*logrus.Entry)
}
// Trace logs a message at level Trace with the log entry passed-in.
func Trace(e *logrus.Entry, args ...interface{}) {
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
if level >= TraceLevel {
e.Debug(args...)
}
}
// Tracef logs a message at level Trace with the log entry passed-in.
func Tracef(e *logrus.Entry, format string, args ...interface{}) {
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
if level >= TraceLevel {
e.Debugf(format, args...)
}
}

View File

@@ -0,0 +1,229 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import specs "github.com/opencontainers/image-spec/specs-go/v1"
// MatchComparer is able to match and compare platforms to
// filter and sort platforms.
type MatchComparer interface {
Matcher
Less(specs.Platform, specs.Platform) bool
}
// Only returns a match comparer for a single platform
// using default resolution logic for the platform.
//
// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
// For ARMv7, will also match ARMv6 and ARMv5
// For ARMv6, will also match ARMv5
func Only(platform specs.Platform) MatchComparer {
platform = Normalize(platform)
if platform.Architecture == "arm" {
if platform.Variant == "v8" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v7",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v7" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v6" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
}
return singlePlatformComparer{
Matcher: &matcher{
Platform: platform,
},
}
}
// Ordered returns a platform MatchComparer which matches any of the platforms
// but orders them in order they are provided.
func Ordered(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return orderedPlatformComparer{
matchers: matchers,
}
}
// Any returns a platform MatchComparer which matches any of the platforms
// with no preference for ordering.
func Any(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return anyPlatformComparer{
matchers: matchers,
}
}
// All is a platform MatchComparer which matches all platforms
// with preference for ordering.
var All MatchComparer = allPlatformComparer{}
type singlePlatformComparer struct {
Matcher
}
func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool {
return c.Match(p1) && !c.Match(p2)
}
type orderedPlatformComparer struct {
matchers []Matcher
}
func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
for _, m := range c.matchers {
p1m := m.Match(p1)
p2m := m.Match(p2)
if p1m && !p2m {
return true
}
if p1m || p2m {
return false
}
}
return false
}
type anyPlatformComparer struct {
matchers []Matcher
}
func (c anyPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
var p1m, p2m bool
for _, m := range c.matchers {
if !p1m && m.Match(p1) {
p1m = true
}
if !p2m && m.Match(p2) {
p2m = true
}
if p1m && p2m {
return false
}
}
// If one matches, and the other does, sort match first
return p1m && !p2m
}
type allPlatformComparer struct{}
func (allPlatformComparer) Match(specs.Platform) bool {
return true
}
func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
return false
}

View File

@@ -0,0 +1,117 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"bufio"
"os"
"runtime"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/pkg/errors"
)
// Present the ARM instruction set architecture, eg: v7, v8
var cpuVariant string
func init() {
if isArmArch(runtime.GOARCH) {
cpuVariant = getCPUVariant()
} else {
cpuVariant = ""
}
}
// For Linux, the kernel has already detected the ABI, ISA and Features.
// So we don't need to access the ARM registers to detect platform information
// by ourselves. We can just parse these information from /proc/cpuinfo
func getCPUInfo(pattern string) (info string, err error) {
if !isLinuxOS(runtime.GOOS) {
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
}
cpuinfo, err := os.Open("/proc/cpuinfo")
if err != nil {
return "", err
}
defer cpuinfo.Close()
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
// the first core is enough.
scanner := bufio.NewScanner(cpuinfo)
for scanner.Scan() {
newline := scanner.Text()
list := strings.Split(newline, ":")
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
return strings.TrimSpace(list[1]), nil
}
}
// Check whether the scanner encountered errors
err = scanner.Err()
if err != nil {
return "", err
}
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
}
func getCPUVariant() string {
if runtime.GOOS == "windows" {
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch runtime.GOARCH {
case "arm64":
variant = "v8"
case "arm":
variant = "v7"
default:
variant = "unknown"
}
return variant
}
variant, err := getCPUInfo("Cpu architecture")
if err != nil {
log.L.WithError(err).Error("failure getting variant")
return ""
}
switch variant {
case "8", "AArch64":
variant = "v8"
case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
case "6", "6TEJ":
variant = "v6"
case "5", "5T", "5TE", "5TEJ":
variant = "v5"
case "4", "4T":
variant = "v4"
case "3":
variant = "v3"
default:
variant = "unknown"
}
return variant
}

View File

@@ -0,0 +1,114 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"runtime"
"strings"
)
// isLinuxOS returns true if the operating system is Linux.
//
// The OS value should be normalized before calling this function.
func isLinuxOS(os string) bool {
return os == "linux"
}
// These function are generated from https://golang.org/src/go/build/syslist.go.
//
// We use switch statements because they are slightly faster than map lookups
// and use a little less memory.
// isKnownOS returns true if we know about the operating system.
//
// The OS value should be normalized before calling this function.
func isKnownOS(os string) bool {
switch os {
case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
return true
}
return false
}
// isArmArch returns true if the architecture is ARM.
//
// The arch value should be normalized before being passed to this function.
func isArmArch(arch string) bool {
switch arch {
case "arm", "arm64":
return true
}
return false
}
// isKnownArch returns true if we know about the architecture.
//
// The arch value should be normalized before being passed to this function.
func isKnownArch(arch string) bool {
switch arch {
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
return true
}
return false
}
func normalizeOS(os string) string {
if os == "" {
return runtime.GOOS
}
os = strings.ToLower(os)
switch os {
case "macos":
os = "darwin"
}
return os
}
// normalizeArch normalizes the architecture.
func normalizeArch(arch, variant string) (string, string) {
arch, variant = strings.ToLower(arch), strings.ToLower(variant)
switch arch {
case "i386":
arch = "386"
variant = ""
case "x86_64", "x86-64":
arch = "amd64"
variant = ""
case "aarch64", "arm64":
arch = "arm64"
switch variant {
case "8", "v8":
variant = ""
}
case "armhf":
arch = "arm"
variant = "v7"
case "armel":
arch = "arm"
variant = "v6"
case "arm":
switch variant {
case "", "7":
variant = "v7"
case "5", "6", "8":
variant = "v" + variant
}
}
return arch, variant
}

View File

@@ -0,0 +1,38 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
"runtime"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// DefaultString returns the default string specifier for the platform.
func DefaultString() string {
return Format(DefaultSpec())
}
// DefaultSpec returns the current platform's default platform specification.
func DefaultSpec() specs.Platform {
return specs.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
// The Variant field will be empty if arch != ARM.
Variant: cpuVariant,
}
}

View File

@@ -0,0 +1,24 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
// Default returns the default matcher for the platform.
func Default() MatchComparer {
return Only(DefaultSpec())
}

View File

@@ -0,0 +1,31 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import (
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// Default returns the default matcher for the platform.
func Default() MatchComparer {
return Ordered(DefaultSpec(), specs.Platform{
OS: "linux",
Architecture: "amd64",
})
}

View File

@@ -0,0 +1,279 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package platforms provides a toolkit for normalizing, matching and
// specifying container platforms.
//
// Centered around OCI platform specifications, we define a string-based
// specifier syntax that can be used for user input. With a specifier, users
// only need to specify the parts of the platform that are relevant to their
// context, providing an operating system or architecture or both.
//
// How do I use this package?
//
// The vast majority of use cases should simply use the match function with
// user input. The first step is to parse a specifier into a matcher:
//
// m, err := Parse("linux")
// if err != nil { ... }
//
// Once you have a matcher, use it to match against the platform declared by a
// component, typically from an image or runtime. Since extracting an images
// platform is a little more involved, we'll use an example against the
// platform default:
//
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
//
// This can be composed in loops for resolving runtimes or used as a filter for
// fetch and select images.
//
// More details of the specifier syntax and platform spec follow.
//
// Declaring Platform Support
//
// Components that have strict platform requirements should use the OCI
// platform specification to declare their support. Typically, this will be
// images and runtimes that should make these declaring which platform they
// support specifically. This looks roughly as follows:
//
// type Platform struct {
// Architecture string
// OS string
// Variant string
// }
//
// Most images and runtimes should at least set Architecture and OS, according
// to their GOARCH and GOOS values, respectively (follow the OCI image
// specification when in doubt). ARM should set variant under certain
// discussions, which are outlined below.
//
// Platform Specifiers
//
// While the OCI platform specifications provide a tool for components to
// specify structured information, user input typically doesn't need the full
// context and much can be inferred. To solve this problem, we introduced
// "specifiers". A specifier has the format
// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
// operating system or the architecture or both.
//
// An example of a common specifier is `linux/amd64`. If the host has a default
// of runtime that matches this, the user can simply provide the component that
// matters. For example, if a image provides amd64 and arm64 support, the
// operating system, `linux` can be inferred, so they only have to provide
// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
// where the architecture may be known but a runtime may support images from
// different operating systems.
//
// Normalization
//
// Because not all users are familiar with the way the Go runtime represents
// platforms, several normalizations have been provided to make this package
// easier to user.
//
// The following are performed for architectures:
//
// Value Normalized
// aarch64 arm64
// armhf arm
// armel arm/v6
// i386 386
// x86_64 amd64
// x86-64 amd64
//
// We also normalize the operating system `macos` to `darwin`.
//
// ARM Support
//
// To qualify ARM architecture, the Variant field is used to qualify the arm
// version. The most common arm version, v7, is represented without the variant
// unless it is explicitly provided. This is treated as equivalent to armhf. A
// previous architecture, armel, will be normalized to arm/v6.
//
// While these normalizations are provided, their support on arm platforms has
// not yet been fully implemented and tested.
package platforms
import (
"regexp"
"runtime"
"strconv"
"strings"
"github.com/containerd/containerd/errdefs"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
var (
specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
)
// Matcher matches platforms specifications, provided by an image or runtime.
type Matcher interface {
Match(platform specs.Platform) bool
}
// NewMatcher returns a simple matcher based on the provided platform
// specification. The returned matcher only looks for equality based on os,
// architecture and variant.
//
// One may implement their own matcher if this doesn't provide the required
// functionality.
//
// Applications should opt to use `Match` over directly parsing specifiers.
func NewMatcher(platform specs.Platform) Matcher {
return &matcher{
Platform: Normalize(platform),
}
}
type matcher struct {
specs.Platform
}
func (m *matcher) Match(platform specs.Platform) bool {
normalized := Normalize(platform)
return m.OS == normalized.OS &&
m.Architecture == normalized.Architecture &&
m.Variant == normalized.Variant
}
func (m *matcher) String() string {
return Format(m.Platform)
}
// Parse parses the platform specifier syntax into a platform declaration.
//
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
// The minimum required information for a platform specifier is the operating
// system or architecture. If there is only a single string (no slashes), the
// value will be matched against the known set of operating systems, then fall
// back to the known set of architectures. The missing component will be
// inferred based on the local environment.
func Parse(specifier string) (specs.Platform, error) {
if strings.Contains(specifier, "*") {
// TODO(stevvooe): need to work out exact wildcard handling
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
}
parts := strings.Split(specifier, "/")
for _, part := range parts {
if !specifierRe.MatchString(part) {
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
}
}
var p specs.Platform
switch len(parts) {
case 1:
// in this case, we will test that the value might be an OS, then look
// it up. If it is not known, we'll treat it as an architecture. Since
// we have very little information about the platform here, we are
// going to be a little more strict if we don't know about the argument
// value.
p.OS = normalizeOS(parts[0])
if isKnownOS(p.OS) {
// picks a default architecture
p.Architecture = runtime.GOARCH
if p.Architecture == "arm" {
// TODO(stevvooe): Resolve arm variant, if not v6 (default)
return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
}
return p, nil
}
p.Architecture, p.Variant = normalizeArch(parts[0], "")
if p.Architecture == "arm" && p.Variant == "v7" {
p.Variant = ""
}
if isKnownArch(p.Architecture) {
p.OS = runtime.GOOS
return p, nil
}
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
case 2:
// In this case, we treat as a regular os/arch pair. We don't care
// about whether or not we know of the platform.
p.OS = normalizeOS(parts[0])
p.Architecture, p.Variant = normalizeArch(parts[1], "")
if p.Architecture == "arm" && p.Variant == "v7" {
p.Variant = ""
}
return p, nil
case 3:
// we have a fully specified variant, this is rare
p.OS = normalizeOS(parts[0])
p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
if p.Architecture == "arm64" && p.Variant == "" {
p.Variant = "v8"
}
return p, nil
}
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
}
// MustParse is like Parses but panics if the specifier cannot be parsed.
// Simplifies initialization of global variables.
func MustParse(specifier string) specs.Platform {
p, err := Parse(specifier)
if err != nil {
panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
}
return p
}
// Format returns a string specifier from the provided platform specification.
func Format(platform specs.Platform) string {
if platform.OS == "" {
return "unknown"
}
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
}
func joinNotEmpty(s ...string) string {
var ss []string
for _, s := range s {
if s == "" {
continue
}
ss = append(ss, s)
}
return strings.Join(ss, "/")
}
// Normalize validates and translate the platform to the canonical value.
//
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
// "x86_64" is encountered, it becomes "amd64".
func Normalize(platform specs.Platform) specs.Platform {
platform.OS = normalizeOS(platform.OS)
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
// these fields are deprecated, remove them
platform.OSFeatures = nil
platform.OSVersion = ""
return platform
}

View File

@@ -40,8 +40,8 @@ func CheckAuthFile(authfile string) error {
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
if sys != nil {
sysCopy := *sys
sys = &sysCopy
copy := *sys
sys = &copy
} else {
sys = &types.SystemContext{}
}
@@ -126,7 +126,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
// Write the new credentials to the authfile
if err := config.SetAuthentication(systemContext, server, username, password); err != nil {
if err = config.SetAuthentication(systemContext, server, username, password); err != nil {
return err
}
}
@@ -150,13 +150,17 @@ func getRegistryName(server string) string {
// gets the registry from the input. If the input is of the form
// quay.io/myuser/myimage, it will parse it and just return quay.io
split := strings.Split(server, "/")
if len(split) > 1 {
return split[0]
}
return split[0]
}
// getUserAndPass gets the username and password from STDIN if not given
// using the -u and -p flags. If the username prompt is left empty, the
// displayed userFromAuthFile will be used instead.
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) {
func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (string, string, error) {
var err error
reader := bufio.NewReader(opts.Stdin)
username := opts.Username
if username == "" {

View File

@@ -1,95 +0,0 @@
package retry
import (
"context"
"io"
"math"
"net"
"net/url"
"syscall"
"time"
"github.com/docker/distribution/registry/api/errcode"
errcodev2 "github.com/docker/distribution/registry/api/v2"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RetryOptions defines the option to retry
type RetryOptions struct {
MaxRetry int // The number of times to possibly retry
Delay time.Duration // The delay to use between retries, if set
}
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
if retryOptions.Delay != 0 {
delay = retryOptions.Delay
}
logrus.Infof("Warning: failed, retrying in %s ... (%d/%d)", delay, attempt+1, retryOptions.MaxRetry)
select {
case <-time.After(delay):
break
case <-ctx.Done():
return err
}
err = operation()
}
return err
}
func isRetryable(err error) bool {
err = errors.Cause(err)
if err == context.Canceled || err == context.DeadlineExceeded {
return false
}
type unwrapper interface {
Unwrap() error
}
switch e := err.(type) {
case errcode.Error:
switch e.Code {
case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
return false
}
return true
case *net.OpError:
return isRetryable(e.Err)
case *url.Error: // This includes errors returned by the net/http client.
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
return true
}
return isRetryable(e.Err)
case syscall.Errno:
return e != syscall.ECONNREFUSED
case errcode.Errors:
// if this error is a group of errors, process them all in turn
for i := range e {
if !isRetryable(e[i]) {
return false
}
}
return true
case *multierror.Error:
// if this error is a group of errors, process them all in turn
for i := range e.Errors {
if !isRetryable(e.Errors[i]) {
return false
}
}
return true
case unwrapper:
err = e.Unwrap()
return isRetryable(err)
}
return false
}

View File

@@ -377,7 +377,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if len(sigs) != 0 {
c.Printf("Checking if image list destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
return nil, "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
return nil, "", errors.Wrap(err, "Can not copy signatures")
}
}
canModifyManifestList := (len(sigs) == 0)
@@ -595,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if len(sigs) != 0 {
c.Printf("Checking if image destination supports signatures\n")
if err := c.dest.SupportsSignatures(ctx); err != nil {
return nil, "", "", errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
return nil, "", "", errors.Wrap(err, "Can not copy signatures")
}
}

View File

@@ -3,8 +3,9 @@ package archive
import (
"context"
"io"
"os"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -12,38 +13,37 @@ import (
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
archive *tarfile.Writer // Should only be closed if writer != nil
writer io.Closer // May be nil if the archive is shared
writer io.Closer
}
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
if ref.sourceIndex != -1 {
return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
// ref.path can be either a pipe or a regular file
// in the case of a pipe, we require that we can open it for write
// in the case of a regular file, we don't want to overwrite any pre-existing file
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
// only in a different way. Either way, its up to the user to not have two writers to the same path.)
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", ref.path)
}
var archive *tarfile.Writer
var writer io.Closer
if ref.archiveWriter != nil {
archive = ref.archiveWriter
writer = nil
} else {
fh, err := openArchiveForWriting(ref.path)
if err != nil {
return nil, err
}
archive = tarfile.NewWriter(fh)
writer = fh
fhStat, err := fh.Stat()
if err != nil {
return nil, errors.Wrapf(err, "error statting file %q", ref.path)
}
tarDest := tarfile.NewDestination(sys, archive, ref.ref)
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
return nil, errors.New("docker-archive doesn't support modifying existing images")
}
tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
return &archiveImageDestination{
Destination: tarDest,
ref: ref,
archive: archive,
writer: writer,
writer: fh,
}, nil
}
@@ -60,10 +60,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
if d.writer != nil {
return d.writer.Close()
}
return nil
return d.writer.Close()
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@@ -71,8 +68,5 @@ func (d *archiveImageDestination) Close() error {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
if d.writer != nil {
return d.archive.Close()
}
return nil
return d.Destination.Commit(ctx)
}

View File

@@ -1,120 +0,0 @@
package archive
import (
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Reader manages a single Docker archive, allows listing its contents and accessing
// individual images with less overhead than creating image references individually
// (because the archive is, if necessary, copied or decompressed only once).
type Reader struct {
path string // The original, user-specified path; not the maintained temporary file, if any
archive *tarfile.Reader
}
// NewReader returns a Reader for path.
// The caller should call .Close() on the returned object.
func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
archive, err := tarfile.NewReaderFromFile(sys, path)
if err != nil {
return nil, err
}
return &Reader{
path: path,
archive: archive,
}, nil
}
// Close deletes temporary files associated with the Reader, if any.
func (r *Reader) Close() error {
return r.archive.Close()
}
// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
// and a variant of imageReference that points at the same image within the reader.
// The caller should call .Close() on the returned Reader.
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
standalone, ok := ref.(archiveReference)
if !ok {
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
if standalone.archiveReader != nil {
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
}
reader, err := NewReader(sys, standalone.path)
if err != nil {
return nil, nil, err
}
succeeded := false
defer func() {
if !succeeded {
reader.Close()
}
}()
readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
if err != nil {
return nil, nil, err
}
succeeded = true
return reader, readerRef, nil
}
// List returns the a set of references for images in the Reader,
// grouped by the image the references point to.
// The references are valid only until the Reader is closed.
func (r *Reader) List() ([][]types.ImageReference, error) {
res := [][]types.ImageReference{}
for imageIndex, image := range r.archive.Manifest {
refs := []types.ImageReference{}
for _, tag := range image.RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
}
nt, ok := parsedTag.(reference.NamedTagged)
if !ok {
return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
}
ref, err := newReference(r.path, nt, -1, r.archive, nil)
if err != nil {
return nil, errors.Wrapf(err, "Error creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
}
refs = append(refs, ref)
}
if len(refs) == 0 {
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
if err != nil {
return nil, errors.Wrapf(err, "Error creating a reference for manifest item @%d", imageIndex)
}
refs = append(refs, ref)
}
res = append(res, refs)
}
return res, nil
}
// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
// (i.e. exposing the short names before normalization).
// The function reports an error if ref does not identify a single image.
// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
// matching the image are returned.
// Almost all users should use List() or ImageReference.DockerReference() instead.
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
archiveRef, ok := ref.(archiveReference)
if !ok {
return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
}
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
if err != nil {
return nil, err
}
if tagIndex != -1 {
return []string{manifestItem.RepoTags[tagIndex]}, nil
}
return manifestItem.RepoTags, nil
}

View File

@@ -3,8 +3,9 @@ package archive
import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
type archiveImageSource struct {
@@ -15,20 +16,13 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {
archive = ref.archiveReader
closeArchive = false
} else {
a, err := tarfile.NewReaderFromFile(sys, ref.path)
if err != nil {
return nil, err
}
archive = a
closeArchive = true
if ref.destinationRef != nil {
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
}
src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
if err != nil {
return nil, err
}
src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
return &archiveImageSource{
Source: src,
ref: ref,

View File

@@ -3,10 +3,8 @@ package archive
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
ctrImage "github.com/containers/image/v5/image"
"github.com/containers/image/v5/transports"
@@ -44,16 +42,9 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
// archiveReference is an ImageReference for Docker images.
type archiveReference struct {
path string
// May be nil to read the only image in an archive, or to create an untagged image.
ref reference.NamedTagged
// If not -1, a zero-based index of the image in the manifest. Valid only for sources.
// Must not be set if ref is set.
sourceIndex int
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
// file, not necesarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
archiveWriter *tarfile.Writer
// only used for destinations,
// archiveReference.destinationRef is optional and can be nil for destinations as well.
destinationRef reference.NamedTagged
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@@ -64,69 +55,37 @@ func ParseReference(refString string) (types.ImageReference, error) {
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
var nt reference.NamedTagged
sourceIndex := -1
var destinationRef reference.NamedTagged
// A :tag was specified, which is only necessary for destinations.
if len(parts) == 2 {
// A :tag or :@index was specified.
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if err != nil {
return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
}
if i < 0 {
return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, errors.Wrapf(err, "docker-archive parsing reference")
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
}
nt = refTagged
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, errors.Wrapf(err, "docker-archive parsing reference")
}
ref = reference.TagNameOnly(ref)
refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged {
// Really shouldn't be hit...
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
}
destinationRef = refTagged
}
return newReference(path, nt, sourceIndex, nil, nil)
return NewReference(path, destinationRef)
}
// NewReference returns a Docker archive reference for a path and an optional reference.
func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
return newReference(path, ref, -1, nil, nil)
}
// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
return newReference(path, nil, sourceIndex, nil, nil)
}
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
// NewReference rethrns a Docker archive reference for a path and an optional destination reference.
func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) {
if strings.Contains(path, ":") {
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if ref != nil && sourceIndex != -1 {
return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
}
if _, isDigest := ref.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
}
if sourceIndex != -1 && sourceIndex < 0 {
return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
if _, isDigest := destinationRef.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String())
}
return archiveReference{
path: path,
ref: ref,
sourceIndex: sourceIndex,
archiveReader: archiveReader,
archiveWriter: archiveWriter,
path: path,
destinationRef: destinationRef,
}, nil
}
@@ -140,21 +99,17 @@ func (ref archiveReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
switch {
case ref.ref != nil:
return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
case ref.sourceIndex != -1:
return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
default:
if ref.destinationRef == nil {
return ref.path
}
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref archiveReference) DockerReference() reference.Named {
return ref.ref
return ref.destinationRef
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.

View File

@@ -1,82 +0,0 @@
package archive
import (
"io"
"os"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Writer manages a single in-progress Docker archive and allows adding images to it.
type Writer struct {
path string // The original, user-specified path; not the maintained temporary file, if any
archive *tarfile.Writer
writer io.Closer
}
// NewWriter returns a Writer for path.
// The caller should call .Close() on the returned object.
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
fh, err := openArchiveForWriting(path)
if err != nil {
return nil, err
}
archive := tarfile.NewWriter(fh)
return &Writer{
path: path,
archive: archive,
writer: fh,
}, nil
}
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
func (w *Writer) Close() error {
err := w.archive.Close()
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
return newReference(w.path, destinationRef, -1, nil, w.archive)
}
// openArchiveForWriting opens path for writing a tar archive,
// making a few sanity checks.
func openArchiveForWriting(path string) (*os.File, error) {
// path can be either a pipe or a regular file
// in the case of a pipe, we require that we can open it for write
// in the case of a regular file, we don't want to overwrite any pre-existing file
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
// only in a different way. Either way, its up to the user to not have two writers to the same path.)
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
succeeded := false
defer func() {
if !succeeded {
fh.Close()
}
}()
fhStat, err := fh.Stat()
if err != nil {
return nil, errors.Wrapf(err, "error statting file %q", path)
}
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
return nil, errors.New("docker-archive doesn't support modifying existing images")
}
succeeded = true
return fh, nil
}

View File

@@ -4,8 +4,8 @@ import (
"context"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/docker/docker/client"
"github.com/pkg/errors"
@@ -16,7 +16,6 @@ type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
archive *tarfile.Writer
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
statusChannel <-chan error
@@ -46,7 +45,6 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
}
reader, writer := io.Pipe()
archive := tarfile.NewWriter(writer)
// Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it.
statusChannel := make(chan error, 1)
@@ -56,8 +54,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
return &daemonImageDestination{
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
Destination: tarfile.NewDestination(sys, archive, namedTaggedRef),
archive: archive,
Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
@@ -133,7 +130,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
logrus.Debugf("docker-daemon: Closing tar stream")
if err := d.archive.Close(); err != nil {
if err := d.Destination.Commit(ctx); err != nil {
return err
}
if err := d.writer.Close(); err != nil {

View File

@@ -3,7 +3,7 @@ package daemon
import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/tarfile"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
@@ -35,11 +35,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
}
defer inputStream.Close()
archive, err := tarfile.NewReaderFromStream(sys, inputStream)
src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
if err != nil {
return nil, err
}
src := tarfile.NewSource(archive, true, nil, -1)
return &daemonImageSource{
ref: ref,
Source: src,

View File

@@ -331,6 +331,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname
@@ -387,55 +388,31 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
}
logrus.Debugf("trying to talk to v2 search endpoint")
searchRes := []SearchResult{}
path := "/v2/_catalog"
for len(searchRes) < limit {
resp, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
v2Res := &V2Results{}
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
for _, repo := range v2Res.Repositories {
if len(searchRes) == limit {
break
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
searchRes = append(searchRes, res)
}
}
link := resp.Header.Get("Link")
if link == "" {
break
}
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return searchRes, err
}
// can be relative or absolute, but we only want the path (and I
// guess we're in trouble if it forwards to a new place...)
path = linkURL.Path
if linkURL.RawQuery != "" {
path += "?"
path += linkURL.RawQuery
return searchRes, nil
}
}
return searchRes, nil
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.

View File

@@ -22,6 +22,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -153,7 +154,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
@@ -174,7 +175,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
return nil, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer chunked")
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
}
uploadLocation, err := res.Location()
if err != nil {
@@ -200,7 +201,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "Error uploading layer to %s", uploadLocation)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
@@ -225,7 +226,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -276,7 +277,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
default:
logrus.Debugf("Error mounting, response %#v", *res)
return errors.Wrapf(registryHTTPResponseToError(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
}
}
@@ -413,7 +414,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(registryHTTPResponseToError(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
@@ -640,7 +641,7 @@ sigExists:
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(registryHTTPResponseToError(res), "Error uploading signature to %s in %s", path, d.c.registry)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/docker/distribution/registry/client"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -192,7 +193,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
}
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
@@ -234,9 +235,6 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
resp *http.Response
err error
)
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
for _, url := range urls {
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
if err == nil {

View File

@@ -44,17 +44,3 @@ func httpResponseToError(res *http.Response, context string) error {
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry
func registryHTTPResponseToError(res *http.Response) error {
errResponse := client.HandleErrorResponse(res)
if e, ok := perrors.Cause(errResponse).(*client.UnexpectedHTTPResponseError); ok {
response := string(e.Response)
if len(response) > 50 {
response = response[:50] + "..."
}
errResponse = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
}
return errResponse
}

View File

@@ -1,217 +0,0 @@
package tarfile
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
archive *Writer
repoTags []reference.NamedTagged
// Other state.
config []byte
sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
return &Destination{
archive: archive,
repoTags: repoTags,
sysCtx: sys,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
// The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
// this is unknown in advance, the actual copy is serialized by d.archive, so there probably isnt
// much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(streamCopy, tee)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
if inputInfo.Digest == "" {
inputInfo.Digest = digester.Digest()
}
stream = streamCopy
logrus.Debugf("... streaming done")
}
if err := d.archive.lock(); err != nil {
return types.BlobInfo{}, err
}
defer d.archive.unlock()
// Maybe the blob has been already sent
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return reusedInfo, nil
}
if isConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
}
}
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if err := d.archive.lock(); err != nil {
return false, types.BlobInfo{}, err
}
defer d.archive.unlock()
return d.archive.tryReusingBlobLocked(info)
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported for docker tar files`)
}
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
if err := d.archive.lock(); err != nil {
return err
}
defer d.archive.unlock()
if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
return err
}
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
}
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
return nil
}

View File

@@ -1,269 +0,0 @@
package tarfile
import (
"archive/tar"
"encoding/json"
"io"
"io/ioutil"
"os"
"path"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
)
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
type Reader struct {
// None of the fields below are modified after the archive is created, until .Close();
// this allows concurrent readers of the same archive.
path string // "" if the archive has already been closed.
removeOnClose bool // Remove file on close if true
Manifest []ManifestItem // Guaranteed to exist after the archive is created.
}
// NewReaderFromFile returns a Reader for the specified path.
// The caller should call .Close() on the returned archive when done.
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
defer file.Close()
// If the file is already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewReaderFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
}
defer stream.Close()
if !isCompressed {
return newReader(path, false)
}
return NewReaderFromStream(sys, stream)
}
// NewReaderFromStream returns a Reader for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewReaderFromFile returns.
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
// In order to be compatible with docker-load, we need to support
// auto-decompression (it's also a nice quality-of-life thing to avoid
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
return nil, errors.Wrap(err, "Error auto-decompressing input")
}
defer uncompressedStream.Close()
// Copy the plain archive to the temporary file.
//
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
}
succeeded = true
return newReader(tarCopyFile.Name(), true)
}
// newReader creates a Reader for the specified path and removeOnClose flag.
// The caller should call .Close() on the returned archive when done.
func newReader(path string, removeOnClose bool) (*Reader, error) {
// This is a valid enough archive, except Manifest is not yet filled.
r := Reader{
path: path,
removeOnClose: removeOnClose,
}
succeeded := false
defer func() {
if !succeeded {
r.Close()
}
}()
// We initialize Manifest immediately when constructing the Reader instead
// of later on-demand because every caller will need the data, and because doing it now
// removes the need to synchronize the access/creation of the data if the archive is later
// used from multiple goroutines to access different images.
// FIXME? Do we need to deal with the legacy format?
bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
if err != nil {
return nil, err
}
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
succeeded = true
return &r, nil
}
// Close removes resources associated with an initialized Reader, if any.
func (r *Reader) Close() error {
path := r.path
r.path = "" // Mark the archive as closed
if r.removeOnClose {
return os.Remove(path)
}
return nil
}
// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
// both of which should be (nil, -1).
// On success, it returns the manifest item and an index of the matching tag, if a tag was used
// for matching; the index is -1 if a tag was not used.
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
switch {
case ref != nil && sourceIndex != -1:
return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
ref.String(), sourceIndex)
case ref != nil:
refString := ref.String()
for i := range r.Manifest {
for tagIndex, tag := range r.Manifest[i].RepoTags {
parsedTag, err := reference.ParseNormalizedNamed(tag)
if err != nil {
return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
}
if parsedTag.String() == refString {
return &r.Manifest[i], tagIndex, nil
}
}
}
return nil, -1, errors.Errorf("Tag %#v not found", refString)
case sourceIndex != -1:
if sourceIndex >= len(r.Manifest) {
return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
sourceIndex, len(r.Manifest))
}
return &r.Manifest[sourceIndex], -1, nil
default:
if len(r.Manifest) != 1 {
return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
}
return &r.Manifest[0], -1, nil
}
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
// It is safe to call this method from multiple goroutines simultaneously.
// The caller should call .Close() on the returned stream.
func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
// This is only a sanity check; if anyone did concurrently close ra, this access is technically
// racy against the write in .Close().
if r.path == "" {
return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
}
f, err := os.Open(r.path)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching componentPath within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
componentPath = path.Clean(componentPath)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if path.Clean(h.Name) == componentPath {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
// It is safe to call this method from multiple goroutines simultaneously.
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
file, err := r.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
if err != nil {
return nil, err
}
return bytes, nil
}

View File

@@ -1,381 +0,0 @@
package tarfile
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
type Writer struct {
mutex sync.Mutex
// ALL of the following members can only be accessed with the mutex held.
// Use Writer.lock() to obtain the mutex.
writer io.Writer
tar *tar.Writer // nil if the Writer has already been closed.
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
// NewWriter returns a Writer for the specified io.Writer.
// The caller must eventually call .Close() on the returned object to create a valid archive.
func NewWriter(dest io.Writer) *Writer {
return &Writer{
writer: dest,
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
legacyLayers: map[string]struct{}{},
manifestByConfig: map[digest.Digest]int{},
}
}
// lock does some sanity checks and locks the Writer.
// If this function suceeds, the caller must call w.unlock.
// Do not use Writer.mutex directly.
func (w *Writer) lock() error {
w.mutex.Lock()
if w.tar == nil {
w.mutex.Unlock()
return errors.New("Internal error: trying to use an already closed tarfile.Writer")
}
return nil
}
// unlock releases the lock obtained by Writer.lock
// Do not use Writer.mutex directly.
func (w *Writer) unlock() {
w.mutex.Unlock()
}
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
return false, types.BlobInfo{}, nil
}
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
// The caller must have locked the Writer.
func (w *Writer) recordBlobLocked(info types.BlobInfo) {
w.blobs[info.Digest] = info
}
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
if _, ok := w.legacyLayers[layerID]; !ok {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return errors.Wrap(err, "Error creating layer symbolic link")
}
b := []byte("1.0")
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
return errors.Wrap(err, "Error writing VERSION file")
}
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
w.legacyLayers[layerID] = struct{}{}
}
return nil
}
// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
var chainID digest.Digest
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
// The root layer doesn't have any parent
if lastLayerID != "" {
layerConfig["parent"] = lastLayerID
}
// The top layer configuration file is generated by using subpart of the image configuration
if i == len(layerDescriptors)-1 {
var config map[string]*json.RawMessage
err := json.Unmarshal(configBytes, &config)
if err != nil {
return errors.Wrap(err, "Error unmarshaling config")
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
}
}
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
if chainID == "" {
chainID = l.Digest
} else {
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
}
// … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
// we create the image configs differently in details. At least recent versions allocate new IDs on load,
// so this is fine as long as the IDs we use are unique / cannot loop.
//
// For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
// config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
//
// Temporarily add the chainID to the config, only for the purpose of generating the image ID.
layerConfig["layer_id"] = chainID
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
if err != nil {
return errors.Wrap(err, "Error marshaling layer config")
}
delete(layerConfig, "layer_id")
layerID := digest.Canonical.FromBytes(b).Hex()
layerConfig["id"] = layerID
configBytes, err := json.Marshal(layerConfig)
if err != nil {
return errors.Wrap(err, "Error marshaling layer config")
}
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
return err
}
lastLayerID = layerID
}
if lastLayerID != "" {
for _, repoTag := range repoTags {
if val, ok := w.repositories[repoTag.Name()]; ok {
val[repoTag.Tag()] = lastLayerID
} else {
w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
}
}
}
return nil
}
// checkManifestItemsMatch checks that a and b describe the same image,
// and returns an error if thats not the case (which should never happen).
func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
if len(a.Layers) != len(b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
for i := range a.Layers {
if a.Layers[i] != b.Layers[i] {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
}
}
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we dont set to anything meaningful.
return nil
}
// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
// The caller must have locked the Writer.
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
layerPaths := []string{}
for _, l := range layerDescriptors {
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
}
var item *ManifestItem
newItem := ManifestItem{
Config: w.configPath(configDigest),
RepoTags: []string{},
Layers: layerPaths,
Parent: "", // We dont have this information
LayerSources: nil,
}
if i, ok := w.manifestByConfig[configDigest]; ok {
item = &w.manifest[i]
if err := checkManifestItemsMatch(item, &newItem); err != nil {
return err
}
} else {
i := len(w.manifest)
w.manifestByConfig[configDigest] = i
w.manifest = append(w.manifest, newItem)
item = &w.manifest[i]
}
knownRepoTags := map[string]struct{}{}
for _, repoTag := range item.RepoTags {
knownRepoTags[repoTag] = struct{}{}
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
if _, ok := knownRepoTags[refString]; !ok {
item.RepoTags = append(item.RepoTags, refString)
knownRepoTags[refString] = struct{}{}
}
}
return nil
}
// Close writes all outstanding data about images to the archive, and finishes writing data
// to the underlying io.Writer.
// No more images can be added after this is called.
func (w *Writer) Close() error {
if err := w.lock(); err != nil {
return err
}
defer w.unlock()
b, err := json.Marshal(&w.manifest)
if err != nil {
return err
}
if err := w.sendBytesLocked(manifestFileName, b); err != nil {
return err
}
b, err = json.Marshal(w.repositories)
if err != nil {
return errors.Wrap(err, "Error marshaling repositories")
}
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
if err := w.tar.Close(); err != nil {
return err
}
w.tar = nil // Mark the Writer as closed.
return nil
}
// configPath returns a path we choose for storing a config with the specified digest.
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) configPath(configDigest digest.Digest) string {
return configDigest.Hex() + ".json"
}
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change
// any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
return layerDigest.Hex() + ".tar"
}
type tarFI struct {
path string
size int64
isSymlink bool
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
if t.isSymlink {
return os.ModeSymlink
}
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendSymlinkLocked sends a symlink into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendSymlinkLocked(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
return nil
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return w.tar.WriteHeader(hdr)
}
// sendBytesLocked sends a path into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendBytesLocked(path string, b []byte) error {
return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
}
// sendFileLocked sends a file into the tar stream.
// The caller must have locked the Writer.
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := w.tar.WriteHeader(hdr); err != nil {
return err
}
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
size, err := io.Copy(w.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -27,9 +26,6 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
// DO NOT change this, instead see systemRegistriesDirPath above.
const builtinRegistriesDirPath = "/etc/containers/registries.d"
// userRegistriesDirPath is the path to the per user registries.d.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
@@ -79,17 +75,14 @@ func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReferenc
// registriesDirPath returns a path to registries.d
func registriesDirPath(sys *types.SystemContext) string {
if sys != nil && sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
if sys != nil {
if sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
}
if sys.RootForImplicitAbsolutePaths != "" {
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
}
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
}
return systemRegistriesDirPath
}

View File

@@ -0,0 +1,424 @@
package tarfile
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
writer io.Writer
tar *tar.Writer
repoTags []reference.NamedTagged
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
config []byte
sysCtx *types.SystemContext
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
// Deprecated: please use NewDestinationWithContext instead
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
return NewDestinationWithContext(nil, dest, ref)
}
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
repoTags := []reference.NamedTagged{}
if ref != nil {
repoTags = append(repoTags, ref)
}
return &Destination{
writer: dest,
tar: tar.NewWriter(dest),
repoTags: repoTags,
blobs: make(map[digest.Digest]types.BlobInfo),
sysCtx: sys,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.repoTags = append(d.repoTags, tags...)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return []string{
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return false
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(streamCopy, tee)
if err != nil {
return types.BlobInfo{}, err
}
_, err = streamCopy.Seek(0, io.SeekStart)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
if inputInfo.Digest == "" {
inputInfo.Digest = digester.Digest()
}
stream = streamCopy
logrus.Debugf("... streaming done")
}
// Maybe the blob has been already sent
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return reusedInfo, nil
}
if isConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
}
d.config = buf
if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
}
} else {
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
}
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
}
if blob, ok := d.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
return false, types.BlobInfo{}, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
repositories := map[string]map[string]string{}
for _, repoTag := range d.repoTags {
if val, ok := repositories[repoTag.Name()]; ok {
val[repoTag.Tag()] = rootLayerID
} else {
repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID}
}
}
b, err := json.Marshal(repositories)
if err != nil {
return errors.Wrap(err, "Error marshaling repositories")
}
if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
return nil
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported for docker tar files`)
}
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors)
if err != nil {
return err
}
if len(man.LayersDescriptors) > 0 {
if err := d.createRepositoriesFile(lastLayerID); err != nil {
return err
}
}
repoTags := []string{}
for _, tag := range d.repoTags {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
repoTags = append(repoTags, refString)
}
items := []ManifestItem{{
Config: man.ConfigDescriptor.Digest.Hex() + ".json",
RepoTags: repoTags,
Layers: layerPaths,
Parent: "",
LayerSources: nil,
}}
itemsBytes, err := json.Marshal(&items)
if err != nil {
return err
}
// FIXME? Do we also need to support the legacy format?
return d.sendBytes(manifestFileName, itemsBytes)
}
// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers
func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) {
var chainID digest.Digest
lastLayerID = ""
for i, l := range layerDescriptors {
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
if chainID == "" {
chainID = l.Digest
} else {
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
}
// … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent
// versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop.
//
// Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID
// (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more
// times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation
// which also mixes in the full image configuration seems unnecessary, at least as long as we are storing
// only a single image per tarball, i.e. all DiffID prefixes are unique (cant differ only with
// configuration).
layerID := chainID.Hex()
physicalLayerPath := l.Digest.Hex() + ".tar"
// The layer itself has been stored into physicalLayerPath in PutManifest.
// So, use that path for layerPaths used in the non-legacy manifest
layerPaths = append(layerPaths, physicalLayerPath)
// ... and create a symlink for the legacy format;
if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return nil, "", errors.Wrap(err, "Error creating layer symbolic link")
}
b := []byte("1.0")
if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing VERSION file")
}
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
layerConfig["id"] = layerID
// The root layer doesn't have any parent
if lastLayerID != "" {
layerConfig["parent"] = lastLayerID
}
// The root layer configuration file is generated by using subpart of the image configuration
if i == len(layerDescriptors)-1 {
var config map[string]*json.RawMessage
err := json.Unmarshal(d.config, &config)
if err != nil {
return nil, "", errors.Wrap(err, "Error unmarshaling config")
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
}
}
b, err := json.Marshal(layerConfig)
if err != nil {
return nil, "", errors.Wrap(err, "Error marshaling layer config")
}
if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing config json file")
}
lastLayerID = layerID
}
return layerPaths, lastLayerID, nil
}
type tarFI struct {
path string
size int64
isSymlink bool
}
func (t *tarFI) Name() string {
return t.path
}
func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
if t.isSymlink {
return os.ModeSymlink
}
return 0444
}
func (t *tarFI) ModTime() time.Time {
return time.Unix(0, 0)
}
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() interface{} {
return nil
}
// sendSymlink sends a symlink into the tar stream.
func (d *Destination) sendSymlink(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
return nil
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return d.tar.WriteHeader(hdr)
}
// sendBytes sends a path into the tar stream.
func (d *Destination) sendBytes(path string, b []byte) error {
return d.sendFile(path, int64(len(b)), bytes.NewReader(b))
}
// sendFile sends a file into the tar stream.
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
if err != nil {
return nil
}
logrus.Debugf("Sending as tar file %s", path)
if err := d.tar.WriteHeader(hdr); err != nil {
return err
}
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
size, err := io.Copy(d.tar, stream)
if err != nil {
return err
}
if size != expectedSize {
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
}
return nil
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
}
if len(signatures) != 0 {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
return nil
}
// Commit finishes writing data to the underlying io.Writer.
// It is the caller's responsibility to close it, if necessary.
func (d *Destination) Commit(ctx context.Context) error {
return d.tar.Close()
}

View File

@@ -0,0 +1,3 @@
// Package tarfile is an internal implementation detail of some transports.
// Do not use outside of the github.com/containers/image repo!
package tarfile

View File

@@ -11,8 +11,8 @@ import (
"path"
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
@@ -22,11 +22,8 @@ import (
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
archive *Reader
closeArchive bool // .Close() the archive when the source is closed.
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
ref reference.NamedTagged // May be nil
sourceIndex int // May be -1
tarPath string
removeTarPathOnClose bool // Remove temp file on close if true
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
@@ -44,16 +41,180 @@ type layerInfo struct {
size int64
}
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
// and sourceIndex (or the only image if they are (nil, -1)).
// The archive will be closed if closeArchive
func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
return &Source{
archive: archive,
closeArchive: closeArchive,
ref: ref,
sourceIndex: sourceIndex,
// TODO: We could add support for multiple images in a single archive, so
// that people could use docker-archive:opensuse.tar:opensuse:leap as
// the source of an image.
// To do for both the NewSourceFromFile and NewSourceFromStream functions
// NewSourceFromFile returns a tarfile.Source for the specified path.
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
// for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromFile(path string) (*Source, error) {
return NewSourceFromFileWithContext(nil, path)
}
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
defer file.Close()
// If the file is already not compressed we can just return the file itself
// as a source. Otherwise we pass the stream to NewSourceFromStream.
stream, isCompressed, err := compression.AutoDecompress(file)
if err != nil {
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
}
defer stream.Close()
if !isCompressed {
return &Source{
tarPath: path,
}, nil
}
return NewSourceFromStreamWithSystemContext(sys, stream)
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
// temp directory for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
return NewSourceFromStreamWithSystemContext(nil, inputStream)
}
// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
// FIXME: use SystemContext here.
// Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
// In order to be compatible with docker-load, we need to support
// auto-decompression (it's also a nice quality-of-life thing to avoid
// giving users really confusing "invalid tar header" errors).
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
if err != nil {
return nil, errors.Wrap(err, "Error auto-decompressing input")
}
defer uncompressedStream.Close()
// Copy the plain archive to the temporary file.
//
// TODO: This can take quite some time, and should ideally be cancellable
// using a context.Context.
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
}
succeeded = true
return &Source{
tarPath: tarCopyFile.Name(),
removeTarPathOnClose: true,
}, nil
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
type tarReadCloser struct {
*tar.Reader
backingFile *os.File
}
func (t *tarReadCloser) Close() error {
return t.backingFile.Close()
}
// openTarComponent returns a ReadCloser for the specific file within the archive.
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
// The caller should call .Close() on the returned stream.
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
f, err := os.Open(s.tarPath)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
f.Close()
}
}()
tarReader, header, err := findTarComponent(f, componentPath)
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
// We follow only one symlink; so no loops are possible.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
// so we don't care.
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
if err != nil {
return nil, err
}
if header == nil {
return nil, os.ErrNotExist
}
}
if !header.FileInfo().Mode().IsRegular() {
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
}
succeeded = true
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
}
// findTarComponent returns a header and a reader matching path within inputFile,
// or (nil, nil, nil) if not found.
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
t := tar.NewReader(inputFile)
for {
h, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, nil, err
}
if h.Name == path {
return t, h, nil
}
}
return nil, nil, nil
}
// readTarComponent returns full contents of componentPath.
func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
file, err := s.openTarComponent(path)
if err != nil {
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
}
defer file.Close()
bytes, err := iolimits.ReadAtMost(file, limit)
if err != nil {
return nil, err
}
return bytes, nil
}
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
@@ -68,31 +229,37 @@ func (s *Source) ensureCachedDataIsPresent() error {
// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
// Call ensureCachedDataIsPresent instead.
func (s *Source) ensureCachedDataIsPresentPrivate() error {
tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
// Read and parse manifest.json
tarManifest, err := s.loadTarManifest()
if err != nil {
return err
}
// Check to make sure length is 1
if len(tarManifest) != 1 {
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
}
// Read and parse config.
configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
if err != nil {
return err
}
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
}
if parsedConfig.RootFS == nil {
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config)
}
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
if err != nil {
return err
}
// Success; commit.
s.tarManifest = tarManifest
s.tarManifest = &tarManifest[0]
s.configBytes = configBytes
s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
@@ -100,17 +267,31 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
return nil
}
// loadTarManifest loads and decodes the manifest.json.
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
// FIXME? Do we need to deal with the legacy format?
bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
if err != nil {
return nil, err
}
var items []ManifestItem
if err := json.Unmarshal(bytes, &items); err != nil {
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
}
return items, nil
}
// Close removes resources associated with an initialized Source, if any.
func (s *Source) Close() error {
if s.closeArchive {
return s.archive.Close()
if s.removeTarPathOnClose {
return os.Remove(s.tarPath)
}
return nil
}
// TarManifest returns contents of manifest.json
func (s *Source) TarManifest() []ManifestItem {
return s.archive.Manifest
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.loadTarManifest()
}
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
@@ -127,7 +308,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
continue
}
layerPath := path.Clean(tarManifest.Layers[i])
layerPath := tarManifest.Layers[i]
if _, ok := unknownLayerSizes[layerPath]; ok {
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
}
@@ -140,7 +321,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
}
// Scan the tar file to collect layer sizes.
file, err := os.Open(s.archive.path)
file, err := os.Open(s.tarPath)
if err != nil {
return nil, err
}
@@ -154,9 +335,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if err != nil {
return nil, err
}
layerPath := path.Clean(h.Name)
// FIXME: Cache this data across images in Reader.
if li, ok := unknownLayerSizes[layerPath]; ok {
if li, ok := unknownLayerSizes[h.Name]; ok {
// Since GetBlob will decompress layers that are compressed we need
// to do the decompression here as well, otherwise we will
// incorrectly report the size. Pretty critical, since tools like
@@ -164,7 +343,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
// the slower method of checking if it's compressed.
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
if err != nil {
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
}
defer uncompressedStream.Close()
@@ -172,11 +351,11 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
if isCompressed {
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
if err != nil {
return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
}
}
li.size = uncompressedSize
delete(unknownLayerSizes, layerPath)
delete(unknownLayerSizes, h.Name)
}
}
if len(unknownLayerSizes) != 0 {
@@ -267,7 +446,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
}
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
underlyingStream, err := s.archive.openTarComponent(li.path)
underlyingStream, err := s.openTarComponent(li.path)
if err != nil {
return nil, 0, err
}

View File

@@ -17,7 +17,7 @@ const (
)
// ManifestItem is an element of the array stored in the top-level manifest.json file.
type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
type ManifestItem struct {
Config string
RepoTags []string
Layers []string

View File

@@ -183,12 +183,7 @@ func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (temp
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
arch, err := os.Open(src)
if err != nil {
return tempDirOCIRef{}, err
}
defer arch.Close()
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := archive.UntarPath(src, dst); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
}

View File

@@ -141,10 +141,6 @@ func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
}
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
if len(urls) == 0 {
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
}
errWrap := errors.New("failed fetching external blob from all urls")
for _, url := range urls {

View File

@@ -11,9 +11,9 @@ import (
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
helperclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/docker/docker/pkg/homedir"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -345,7 +345,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (
return errors.Wrapf(err, "error marshaling JSON %q", path)
}
if err = ioutil.WriteFile(path, newData, 0600); err != nil {
if err = ioutil.WriteFile(path, newData, 0755); err != nil {
return errors.Wrapf(err, "error writing to file %q", path)
}
}

View File

@@ -338,86 +338,55 @@ func (config *V2RegistriesConf) postProcess() error {
}
// ConfigPath returns the path to the system-wide registry configuration file.
// Deprecated: This API implies configuration is read from files, and that there is only one.
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigPath(ctx *types.SystemContext) string {
return newConfigWrapper(ctx).configPath
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
return ctx.SystemRegistriesConfPath
}
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
if _, err := os.Stat(userRegistriesFilePath); err == nil {
return userRegistriesFilePath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
}
return systemRegistriesConfPath
}
// ConfigDirPath returns the path to the directory for drop-in
// ConfigDirPath returns the path to the system-wide directory for drop-in
// registry configuration files.
// Deprecated: This API implies configuration is read from directories, and that there is only one.
// Please use ConfigurationSourceDescription to obtain a string usable for error messages.
func ConfigDirPath(ctx *types.SystemContext) string {
configWrapper := newConfigWrapper(ctx)
if configWrapper.userConfigDirPath != "" {
return configWrapper.userConfigDirPath
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
return ctx.SystemRegistriesConfDirPath
}
return configWrapper.configDirPath
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
}
return systemRegistriesConfDirPath
}
// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
// and acts as a key to the internal cache.
type configWrapper struct {
// path to the registries.conf file
configPath string
// path to system-wide registries.conf.d directory, or "" if not used
configPath string
configDirPath string
// path to user specificed registries.conf.d directory, or "" if not used
userConfigDirPath string
}
// newConfigWrapper returns a configWrapper for the specified SystemContext.
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
var wrapper configWrapper
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
// decide configPath using per-user path or system file
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
wrapper.configPath = ctx.SystemRegistriesConfPath
} else if _, err := os.Stat(userRegistriesFilePath); err == nil {
// per-user registries.conf exists, not reading system dir
// return config dirs from ctx or per-user one
wrapper.configPath = userRegistriesFilePath
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
} else {
wrapper.userConfigDirPath = userRegistriesDirPath
}
return wrapper
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
} else {
wrapper.configPath = systemRegistriesConfPath
return configWrapper{
configPath: ConfigPath(ctx),
configDirPath: ConfigDirPath(ctx),
}
// potentially use both system and per-user dirs if not using per-user config file
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
// dir explicitly chosen: use only that one
wrapper.configDirPath = ctx.SystemRegistriesConfDirPath
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
wrapper.userConfigDirPath = userRegistriesDirPath
} else {
wrapper.configDirPath = systemRegistriesConfDirPath
wrapper.userConfigDirPath = userRegistriesDirPath
}
return wrapper
}
// ConfigurationSourceDescription returns a string containres paths of registries.conf and registries.conf.d
func ConfigurationSourceDescription(ctx *types.SystemContext) string {
wrapper := newConfigWrapper(ctx)
configSources := []string{wrapper.configPath}
if wrapper.configDirPath != "" {
configSources = append(configSources, wrapper.configDirPath)
}
if wrapper.userConfigDirPath != "" {
configSources = append(configSources, wrapper.userConfigDirPath)
}
return strings.Join(configSources, ", ")
}
// configMutex is used to synchronize concurrent accesses to configCache.
@@ -453,49 +422,39 @@ func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
// directory.
func dropInConfigs(wrapper configWrapper) ([]string, error) {
var (
configs []string
dirPaths []string
)
if wrapper.configDirPath != "" {
dirPaths = append(dirPaths, wrapper.configDirPath)
}
if wrapper.userConfigDirPath != "" {
dirPaths = append(dirPaths, wrapper.userConfigDirPath)
}
for _, dirPath := range dirPaths {
err := filepath.Walk(dirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != dirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
configs = append(configs, path)
}
return nil
}
},
)
var configs []string
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
return nil, errors.Wrapf(err, "error reading registries.conf.d")
}
err := filepath.Walk(wrapper.configDirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != wrapper.configDirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
configs = append(configs, path)
}
return nil
}
},
)
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
return nil, errors.Wrapf(err, "error reading registries.conf.d")
}
return configs, nil

View File

@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 6
VersionMinor = 5
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@@ -1,22 +0,0 @@
dist: xenial
language: go
os:
- linux
go:
- "1.13.x"
matrix:
include:
- os: linux
go_import_path: github.com/containers/ocicrypt
install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.19.1
script:
- make
- make check
- make test

View File

@@ -1,3 +0,0 @@
## Security and Disclosure Information Policy for the OCIcrypt Library Project
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.

View File

@@ -3,13 +3,15 @@ module github.com/containers/ocicrypt
go 1.12
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/containerd/containerd v1.2.10
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/stretchr/testify v1.3.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
google.golang.org/grpc v1.24.0 // indirect
gopkg.in/square/go-jose.v2 v2.3.1
gotest.tools v2.2.0+incompatible // indirect
)

View File

@@ -1,7 +1,23 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/containerd v1.2.10 h1:liQDhXqIn7y6cJ/7qBgOaZsiTZJc56/wkkhDBiDBRDw=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@@ -10,16 +26,22 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -27,5 +49,15 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/p
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -170,7 +170,7 @@ func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
args = []string{"--homedir", gc.gpgHomeDir}
args = append([]string{"--homedir", gc.gpgHomeDir})
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))
@@ -229,7 +229,7 @@ func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool,
var args []string
if gc.gpgHomeDir != "" {
args = []string{"--homedir", gc.gpgHomeDir}
args = append([]string{"--homedir", gc.gpgHomeDir})
}
args = append(args, option, fmt.Sprintf("0x%x", keyid))

View File

@@ -1,16 +1,19 @@
package helpers
import (
"context"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
encutils "github.com/containers/ocicrypt/utils"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
@@ -63,23 +66,6 @@ func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, er
return gpgRecipients, pubkeys, x509s, nil
}
// processx509Certs processes x509 certificate files
func processx509Certs(keys []string) ([][]byte, error) {
var x509s [][]byte
for _, key := range keys {
tmp, err := ioutil.ReadFile(key)
if err != nil {
return nil, errors.Wrap(err, "Unable to read file")
}
if !encutils.IsCertificate(tmp) {
continue
}
x509s = append(x509s, tmp)
}
return x509s, nil
}
// processPwdString process a password that may be in any of the following formats:
// - file=<passwordfile>
// - pass=<password>
@@ -155,14 +141,33 @@ func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]b
gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp)
gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password)
} else {
// ignore if file is not recognized, so as not to error if additional
// metadata/cert files exists
continue
return nil, nil, nil, nil, fmt.Errorf("unidentified private key in file %s (password=%s)", keyfile, string(password))
}
}
return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, nil
}
func createGPGClient(context context.Context) (ocicrypt.GPGClient, error) {
return ocicrypt.NewGPGClient(context.Value("gpg-version").(string), context.Value("gpg-homedir").(string))
}
func getGPGPrivateKeys(context context.Context, gpgSecretKeyRingFiles [][]byte, descs []ocispec.Descriptor, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) {
gpgClient, err := createGPGClient(context)
if err != nil {
return nil, nil, err
}
var gpgVault ocicrypt.GPGVault
if len(gpgSecretKeyRingFiles) > 0 {
gpgVault = ocicrypt.NewGPGVault()
err = gpgVault.AddSecretKeyRingDataArray(gpgSecretKeyRingFiles)
if err != nil {
return nil, nil, err
}
}
return ocicrypt.GPGGetPrivateKey(descs, gpgClient, gpgVault, mustFindKey)
}
// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary
// information to perform decryption from command line options and possibly
// LayerInfos describing the image and helping us to query for the PGP decryption keys
@@ -175,13 +180,6 @@ func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig
return encconfig.CryptoConfig{}, err
}
// x509 certs can also be passed in via keys
x509FromKeys, err := processx509Certs(keys)
if err != nil {
return encconfig.CryptoConfig{}, err
}
x509s = append(x509s, x509FromKeys...)
gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, err := processPrivateKeyFiles(keys)
if err != nil {
return encconfig.CryptoConfig{}, err
@@ -238,6 +236,20 @@ func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig
return encconfig.CombineCryptoConfigs(ccs), nil
}
// parsePlatformArray parses an array of specifiers and converts them into an array of specs.Platform
func parsePlatformArray(specifiers []string) ([]ocispec.Platform, error) {
var speclist []ocispec.Platform
for _, specifier := range specifiers {
spec, err := platforms.Parse(specifier)
if err != nil {
return []ocispec.Platform{}, err
}
speclist = append(speclist, spec)
}
return speclist, nil
}
// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys
func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) {
var decryptCc *encconfig.CryptoConfig

View File

@@ -23,8 +23,8 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
"github.com/fullsailor/pkcs7"
"github.com/pkg/errors"
"go.mozilla.org/pkcs7"
)
type pkcs7KeyWrapper struct {

View File

@@ -19,12 +19,12 @@ env:
###
FEDORA_NAME: "fedora-32"
PRIOR_FEDORA_NAME: "fedora-31"
UBUNTU_NAME: "ubuntu-20"
PRIOR_UBUNTU_NAME: "ubuntu-19"
UBUNTU_NAME: "ubuntu-19"
PRIOR_UBUNTU_NAME: "ubuntu-18"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
_BUILT_IMAGE_SUFFIX: "libpod-6508632441356288"
_BUILT_IMAGE_SUFFIX: "libpod-6224667180531712" # From the packer output of 'build_vm_images_script'
FEDORA_CACHE_IMAGE_NAME: "${FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "${PRIOR_FEDORA_NAME}-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "${UBUNTU_NAME}-${_BUILT_IMAGE_SUFFIX}"
@@ -62,10 +62,8 @@ testing_task:
- lint
# Not all $TEST_DRIVER combinations are valid for all OS types.
# N/B: As of the addition of this note, nested-variable resolution
# does not happen for boolean `only_if` expressions. Since $VM_IMAGE
# contains nested variables, we must filter based on that and not the
# actual distro/version value.
# Note: Nested-variable resolution happens at runtime, not eval. time.
# Use verbose logic for ease of reading/maintaining.
only_if: >-
( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "vfs" ) ||
( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "aufs" ) ||
@@ -106,7 +104,7 @@ lint_task:
env:
CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage"
container:
image: golang:1.15
image: golang:1.12
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
@@ -142,21 +140,9 @@ meta_task:
vendor_task:
container:
image: golang:1.15
image: golang:1.13
modules_cache:
fingerprint_script: cat go.sum
folder: $GOPATH/pkg/mod
build_script: make vendor
test_script: hack/tree_status.sh
# Represent overall pass/fail status from required dependent tasks
success_task:
depends_on:
- lint
- testing
- meta
- vendor
container:
image: golang:1.15
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
script: /bin/true

View File

@@ -1 +1 @@
1.23.5
1.20.2

View File

@@ -143,6 +143,10 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
}
dstPath := filepath.Join(dstDir, relPath)
if err != nil {
return err
}
stat, ok := f.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)

View File

@@ -51,10 +51,6 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
if c.checker.IsMounted(path) {
m.count++
}
} else if !c.checker.IsMounted(path) {
// if the unmount was performed outside of this process (e.g. conmon cleanup)
//the ref counter would lose track of it. Check if it is still mounted.
m.count = 0
}
infoOp(m)
count := m.count

View File

@@ -23,7 +23,6 @@ type directLVMConfig struct {
ThinpMetaPercent uint64
AutoExtendPercent uint64
AutoExtendThreshold uint64
MetaDataSize string
}
var (
@@ -122,19 +121,15 @@ func checkDevHasFS(dev string) error {
}
func verifyBlockDevice(dev string, force bool) error {
absPath, err := filepath.Abs(dev)
realPath, err := filepath.Abs(dev)
if err != nil {
return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
}
realPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
}
if err := checkDevAvailable(absPath); err != nil {
logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath)
if err := checkDevAvailable(realPath); err != nil {
return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath)
}
if err := checkDevAvailable(realPath); err != nil {
return err
}
if err := checkDevInVG(realPath); err != nil {
return err
@@ -210,11 +205,8 @@ func setupDirectLVM(cfg directLVMConfig) error {
if cfg.ThinpMetaPercent == 0 {
cfg.ThinpMetaPercent = 1
}
if cfg.MetaDataSize == "" {
cfg.MetaDataSize = "128k"
}
out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput()
out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput()
if err != nil {
return errors.Wrap(err, string(out))
}

View File

@@ -101,7 +101,6 @@ type DeviceSet struct {
// Options
dataLoopbackSize int64
metaDataSize string
metaDataLoopbackSize int64
baseFsSize uint64
filesystem string
@@ -1749,7 +1748,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
// - Managed by container storage
// - The target of this device is at major <maj> and minor <min>
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino)
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
// Check for the existence of the thin-pool device
@@ -2709,8 +2708,6 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
devices.mountOptions = joinMountOptions(devices.mountOptions, val)
case "dm.metadatadev":
devices.metadataDevice = val
case "dm.metadata_size":
devices.metaDataSize = val
case "dm.datadev":
devices.dataDevice = val
case "dm.thinpooldev":
@@ -2746,8 +2743,6 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
return nil, err
}
case "dm.metaDataSize":
lvmSetupConfig.MetaDataSize = val
case "dm.min_free_space":
if !strings.HasSuffix(val, "%") {
return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix")

View File

@@ -274,28 +274,22 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil {
return nil, err
}
trimkey := strings.ToLower(key)
trimkey = strings.TrimPrefix(trimkey, "overlay.")
trimkey = strings.TrimPrefix(trimkey, "overlay2.")
trimkey = strings.TrimPrefix(trimkey, ".")
switch trimkey {
case "override_kernel_check":
key = strings.ToLower(key)
switch key {
case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
case "mountopt":
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
o.mountOptions = val
case "size":
case ".size", "overlay.size", "overlay2.size":
logrus.Debugf("overlay: size=%s", val)
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
o.quota.Size = uint64(size)
case "imagestore", "additionalimagestore":
case ".imagestore", "overlay.imagestore", "overlay2.imagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
if val == "" {
continue
}
for _, store := range strings.Split(val, ",") {
store = filepath.Clean(store)
if !filepath.IsAbs(store) {
@@ -310,17 +304,17 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
o.imageStores = append(o.imageStores, store)
}
case "mount_program":
case ".mount_program", "overlay.mount_program", "overlay2.mount_program":
logrus.Debugf("overlay: mount_program=%s", val)
_, err := os.Stat(val)
if err != nil {
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
}
o.mountProgram = val
case "skip_mount_home":
case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
logrus.Debugf("overlay: skip_mount_home=%s", val)
o.skipMountHome, err = strconv.ParseBool(val)
case "ignore_chown_errors":
case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors":
logrus.Debugf("overlay: ignore_chown_errors=%s", val)
o.ignoreChownErrors, err = strconv.ParseBool(val)
if err != nil {
@@ -898,6 +892,19 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
// If the lowers list is still empty, use an empty lower so that we can still force an
// SELinux context for the mount.
// if we are doing a readOnly mount, and there is only one lower
// We should just return the lower directory, no reason to mount.
if !readWrite && d.options.mountProgram == "" {
if len(absLowers) == 0 {
return path.Join(dir, "empty"), nil
}
if len(absLowers) == 1 {
return absLowers[0], nil
}
}
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
relLowers = append(relLowers, path.Join(id, "empty"))

View File

@@ -1,5 +1,3 @@
go 1.15
module github.com/containers/storage
require (
@@ -7,24 +5,25 @@ require (
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
github.com/Microsoft/hcsshim v0.8.9
github.com/docker/go-units v0.4.0
github.com/hashicorp/go-multierror v1.1.0
github.com/klauspost/compress v1.11.0
github.com/klauspost/pgzip v1.2.5
github.com/hashicorp/go-multierror v1.0.0
github.com/klauspost/compress v1.10.7
github.com/klauspost/pgzip v1.2.4
github.com/mattn/go-shellwords v1.0.10
github.com/mistifyio/go-zfs v2.1.1+incompatible
github.com/moby/sys/mountinfo v0.1.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc91
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2
github.com/opencontainers/selinux v1.6.0
github.com/opencontainers/runc v1.0.0-rc90
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700
github.com/opencontainers/selinux v1.5.2
github.com/pkg/errors v0.9.1
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.6.1
github.com/stretchr/testify v1.6.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/vbatts/tar-split v0.11.1
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9
gotest.tools v2.2.0+incompatible
)
go 1.13

View File

@@ -5,17 +5,11 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6tr
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3 h1:qcqzLJa2xCo9sgdCzpT/SJSYxROTEstuhf7ZBHMirms=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
@@ -24,12 +18,6 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -37,8 +25,6 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -48,24 +34,22 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -78,23 +62,16 @@ github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvO
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/moby/sys/mountinfo v0.1.3 h1:KIrhRO14+AkwKvG/g2yIpNMOUVZ02xNhOw8KY1WsLOI=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91 h1:Tp8LWs5G8rFpzTsbRjAtQkPVexhCu0bnANE5IfIhJ6g=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA=
github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q=
github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -103,12 +80,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
@@ -118,24 +89,16 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -161,14 +124,10 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -178,8 +137,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=

View File

@@ -10,7 +10,6 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -466,19 +465,6 @@ func (r *imageStore) addMappedTopLayer(id, layer string) error {
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
if image, ok := r.lookup(id); ok {
initialLen := len(image.MappedTopLayers)
image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer)
// No layer was removed. No need to save.
if initialLen == len(image.MappedTopLayers) {
return nil
}
return r.Save()
}
return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (r *imageStore) Metadata(id string) (string, error) {
if image, ok := r.lookup(id); ok {
return image.Metadata, nil

View File

@@ -772,20 +772,7 @@ func (r *layerStore) Mounted(id string) (int, error) {
}
func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) {
// check whether options include ro option
hasReadOnlyOpt := func(opts []string) bool {
for _, item := range opts {
if item == "ro" {
return true
}
}
return false
}
// You are not allowed to mount layers from readonly stores if they
// are not mounted read/only.
if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
r.mountsLockfile.Lock()
@@ -1013,7 +1000,6 @@ func (r *layerStore) deleteInternal(id string) error {
if layer.MountPoint != "" {
delete(r.bymount, layer.MountPoint)
}
r.deleteInDigestMap(id)
toDeleteIndex := -1
for i, candidate := range r.layers {
if candidate.ID == id {
@@ -1045,27 +1031,6 @@ func (r *layerStore) deleteInternal(id string) error {
return err
}
func (r *layerStore) deleteInDigestMap(id string) {
for digest, layers := range r.bycompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.bycompressedsum[digest] = layers
break
}
}
}
for digest, layers := range r.byuncompressedsum {
for i, layerID := range layers {
if layerID == id {
layers = append(layers[:i], layers[i+1:]...)
r.byuncompressedsum[digest] = layers
break
}
}
}
}
func (r *layerStore) Delete(id string) error {
layer, ok := r.lookup(id)
if !ok {
@@ -1342,7 +1307,6 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
return -1, err
}
defer idLogger.Close()
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, io.MultiWriter(uncompressedCounter, idLogger)), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
@@ -1357,6 +1321,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
return -1, err
}
compressor.Close()
idLogger.Close()
if err == nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
return -1, err

View File

@@ -390,20 +390,16 @@ func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
return mode
}
// ReadSecurityXattrToTarHeader reads security.capability, security,image
// xattrs from filesystem to a tar header
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
capability, err := system.Lgetxattr(path, "security.capability")
if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
return err
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
if err != nil && err != system.EOPNOTSUPP && err != system.ErrNotSupportedPlatform {
return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
}
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
return nil
}
@@ -602,7 +598,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return nil
}
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, buffer []byte) error {
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool) error {
// hdr.Mode is in linux format, which we can use for sycalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
@@ -626,7 +622,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if err != nil {
return err
}
if _, err := io.CopyBuffer(file, reader, buffer); err != nil {
if _, err := io.Copy(file, reader); err != nil {
file.Close()
return err
}
@@ -942,7 +938,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
// Iterate through the files in the archive.
loop:
@@ -1039,7 +1034,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors); err != nil {
return err
}

View File

@@ -18,11 +18,9 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil {
uid = uint32(cuid)
gid = uint32(cgid)
if oldInfo != nil {
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil {
oldUID = uint32(oldcuid)
oldGID = uint32(oldcgid)
}
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil {
oldUID = uint32(oldcuid)
oldGID = uint32(oldcgid)
}
}
ownerChanged := uid != oldUID || gid != oldGID

View File

@@ -37,7 +37,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
buffer := make([]byte, 1<<20)
// Iterate through the files in the archive.
for {
@@ -106,7 +105,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
return 0, err
}
}
@@ -197,7 +196,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, buffer); err != nil {
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors); err != nil {
return 0, err
}

View File

@@ -39,10 +39,6 @@ type ThinpoolOptionsConfig struct {
// log_level sets the log level of devicemapper.
LogLevel string `toml:"log_level"`
// MetadataSize specifies the size of the metadata for the thinpool
// It will be used with the `pvcreate --metadata` option.
MetadataSize string `toml:"metadatasize"`
// MinFreeSpace specifies the min free space percent in a thin pool
// require for new device creation to
MinFreeSpace string `toml:"min_free_space"`
@@ -222,9 +218,6 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
if options.Thinpool.LogLevel != "" {
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
}
if options.Thinpool.MetadataSize != "" {
doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize))
}
if options.Thinpool.MinFreeSpace != "" {
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
}

View File

@@ -262,7 +262,7 @@ func (p *Pattern) compile() error {
}
}
regStr += "(" + escSL + ".*)?$"
regStr += "(/.*)?$"
re, err := regexp.Compile(regStr)
if err != nil {

View File

@@ -35,9 +35,9 @@ type lockfile struct {
// necessary.
func openLock(path string, ro bool) (int, error) {
if ro {
return unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC, 0)
return unix.Open(path, os.O_RDONLY, 0)
}
return unix.Open(path, os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH)
return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
@@ -106,6 +106,7 @@ func (l *lockfile) lock(lType int16, recursive bool) {
if err != nil {
panic(fmt.Sprintf("error opening %q: %v", l.file, err))
}
unix.CloseOnExec(fd)
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when

View File

@@ -4,6 +4,8 @@ import (
"sort"
"strconv"
"strings"
"github.com/containers/storage/pkg/fileutils"
)
// mountError holds an error from a mount or unmount operation
@@ -41,6 +43,33 @@ func (e *mountError) Cause() error {
return e.err
}
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {
return false, err
}
mountpoint, err = fileutils.ReadSymlinkedPath(mountpoint)
if err != nil {
return false, err
}
// Search the table for the mountpoint
for _, e := range entries {
if e.Mountpoint == mountpoint {
return true, nil
}
}
return false, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See

View File

@@ -1,21 +1,40 @@
package mount
import (
"github.com/containers/storage/pkg/fileutils"
"github.com/moby/sys/mountinfo"
)
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.
type Info struct {
// ID is a unique identifier of the mount (may be reused after umount).
ID int
type Info = mountinfo.Info
// Parent indicates the ID of the mount parent (or of self for the top of the
// mount tree).
Parent int
func GetMounts() ([]*Info, error) {
return mountinfo.GetMounts(nil)
}
// Mounted determines if a specified mountpoint has been mounted.
func Mounted(mountpoint string) (bool, error) {
mountpoint, err := fileutils.ReadSymlinkedPath(mountpoint)
if err != nil {
return false, err
}
return mountinfo.Mounted(mountpoint)
// Major indicates one half of the device ID which identifies the device class.
Major int
// Minor indicates one half of the device ID which identifies a specific
// instance of device.
Minor int
// Root of the mount within the filesystem.
Root string
// Mountpoint indicates the mount point relative to the process's root.
Mountpoint string
// Opts represents mount-specific options.
Opts string
// Optional represents optional fields.
Optional string
// Fstype indicates the type of filesystem, such as EXT3.
Fstype string
// Source indicates filesystem specific information or "none".
Source string
// VfsOpts represents per super block options.
VfsOpts string
}

View File

@@ -1,4 +1,4 @@
package mountinfo
package mount
/*
#include <sys/param.h>
@@ -13,8 +13,9 @@ import (
"unsafe"
)
// parseMountTable returns information about mounted filesystems
func parseMountTable(filter FilterFunc) ([]*Info, error) {
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts.
func parseMountTable() ([]*Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
@@ -31,23 +32,10 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) {
var out []*Info
for _, entry := range entries {
var mountinfo Info
var skip, stop bool
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
if filter != nil {
// filter out entries we're not interested in
skip, stop = filter(&mountinfo)
if skip {
continue
}
}
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
out = append(out, &mountinfo)
if stop {
break
}
}
return out, nil
}

Some files were not shown because too many files have changed in this diff Show More