mirror of
https://github.com/containers/skopeo.git
synced 2026-02-02 23:39:02 +00:00
Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1715c90841 | ||
|
|
187299a20b | ||
|
|
89d8bddd9b | ||
|
|
ba649c56bf | ||
|
|
3456577268 | ||
|
|
b52e700666 | ||
|
|
ee32f1f7aa | ||
|
|
5af0da9de6 | ||
|
|
879a6d793f | ||
|
|
2734f93e30 | ||
|
|
2b97124e4a | ||
|
|
7815a5801e | ||
|
|
501e1be3cf | ||
|
|
fc386a6dca | ||
|
|
2a134a0ddd | ||
|
|
17250d7e8d | ||
|
|
65d28709c3 | ||
|
|
d6c6c78d1b | ||
|
|
67ffa00b1d | ||
|
|
a581847345 | ||
|
|
bcd26a4ae4 | ||
|
|
e38c345f23 | ||
|
|
0421fb04c2 | ||
|
|
82186b916f | ||
|
|
15eed5beda | ||
|
|
81837bd55b | ||
|
|
3dec6a1cdf | ||
|
|
fe14427129 | ||
|
|
be27588418 | ||
|
|
fb84437cd1 | ||
|
|
d9b495ca38 | ||
|
|
6b93d4794f | ||
|
|
5d3849a510 | ||
|
|
fef142f811 | ||
|
|
2684e51aa5 | ||
|
|
e814f9605a | ||
|
|
5d136a46ed | ||
|
|
b0b750dfa1 | ||
|
|
e3034e1d91 | ||
|
|
1a259b76da | ||
|
|
ae64ff7084 | ||
|
|
d67d3a4620 | ||
|
|
196bc48723 | ||
|
|
1c6c7bc481 | ||
|
|
6e23a32282 | ||
|
|
f398c9c035 | ||
|
|
0144aa8dc5 | ||
|
|
0df5dcf09c | ||
|
|
f9baaa6b87 | ||
|
|
67ff78925b | ||
|
|
5c611083f2 | ||
|
|
976d57ea45 | ||
|
|
63569fcd63 | ||
|
|
98b3a13b46 |
@@ -15,7 +15,7 @@ that we follow.
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[open issues](https://github.com/containers/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
@@ -122,9 +122,9 @@ IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[issues](https://github.com/containers/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/skopeo/pulls)
|
||||
[PRs](https://github.com/containers/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
|
||||
@@ -42,8 +42,9 @@ RUN set -x \
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
RUN go get github.com/golang/lint/golint
|
||||
WORKDIR /go/src/github.com/projectatomic/skopeo
|
||||
COPY . /go/src/github.com/projectatomic/skopeo
|
||||
RUN go version
|
||||
RUN go get golang.org/x/lint/golint
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
|
||||
40
Makefile
40
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container build-local clean install install-binary install-completions shell test-integration
|
||||
.PHONY: all binary build-container docs build-local clean install install-binary install-completions shell test-integration vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -24,6 +24,7 @@ SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
@@ -34,17 +35,17 @@ ifeq ($(shell go env GOOS), linux)
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
DOCKER_FLAGS := docker run --rm -i #$(DOCKER_ENVS)
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
DOCKER_FLAGS += -t
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
@@ -55,25 +56,29 @@ LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override BUILDTAGS = containers_image_ostree_stub exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
|
||||
# Build a docker image (skopeobuild) that has everything we need to build.
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label:disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label:disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
# Build w/o using containers
|
||||
binary-local:
|
||||
$(GPGME_ENV) $(GO) build ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
@@ -81,12 +86,11 @@ binary-local-static:
|
||||
$(GPGME_ENV) $(GO) build -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" .
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
|
||||
clean:
|
||||
@@ -112,20 +116,20 @@ install-completions:
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
@@ -134,7 +138,7 @@ validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor: vendor.conf
|
||||
vndr -whitelist '^github.com/containers/image/docs/.*'
|
||||
|
||||
17
README.md
17
README.md
@@ -1,7 +1,7 @@
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
|
||||
<img src="https://cdn.rawgit.com/projectatomic/skopeo/master/docs/skopeo.svg" width="250">
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -12,7 +12,7 @@ skopeo [ to also build documentation, see below.
|
||||
```
|
||||
|
||||
To build a pure-Go static binary (disables ostree, devicemapper, btrfs, and gpgme):
|
||||
|
||||
```sh
|
||||
$ make binary-static DISABLE_CGO=1
|
||||
```
|
||||
|
||||
### Building documentation
|
||||
To build the manual you will need go-md2man.
|
||||
```sh
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -34,7 +33,8 @@ func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.Sys
|
||||
|
||||
func copyHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 2 {
|
||||
return errors.New("Usage: copy source destination")
|
||||
cli.ShowCommandHelp(c, "copy")
|
||||
return errors.New("Exactly two arguments expected")
|
||||
}
|
||||
|
||||
policyContext, err := getPolicyContext(c)
|
||||
@@ -87,7 +87,10 @@ func copyHandler(c *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
return copy.Image(context.Background(), policyContext, destRef, srcRef, ©.Options{
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
@@ -95,6 +98,7 @@ func copyHandler(c *cli.Context) error {
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -25,7 +24,10 @@ func deleteHandler(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ref.DeleteImage(context.Background(), sys)
|
||||
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -67,7 +66,8 @@ var inspectCmd = cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
|
||||
img, err := parseImage(ctx, c)
|
||||
if err != nil {
|
||||
@@ -96,10 +96,10 @@ var inspectCmd = cli.Command{
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Possibly overridden for a docker.Image.
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for a docker.Image.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
@@ -111,9 +111,15 @@ var inspectCmd = cli.Command{
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerImg, ok := img.(*docker.Image); ok {
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags(ctx)
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -26,7 +25,8 @@ var layersCmd = cli.Command{
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -60,6 +60,10 @@ func createApp() *cli.App {
|
||||
Value: "",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
|
||||
@@ -40,6 +40,15 @@ func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemC
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func commandTimeoutContextFromGlobalOptions(c *cli.Context) (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if c.GlobalDuration("command-timeout") > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, c.GlobalDuration("command-timeout"))
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
|
||||
@@ -110,6 +110,7 @@ _skopeo_skopeo() {
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
|
||||
@@ -59,6 +59,8 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
@@ -6,7 +6,7 @@ set -e
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/projectatomic/skopeo). Whatever version is checked out
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
@@ -19,7 +19,7 @@ set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/projectatomic/skopeo'
|
||||
export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/projectatomic/skopeo.git'
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
|
||||
@@ -4,13 +4,14 @@ set -e
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_projectatomic="${GOPATH}/src/github.com/projectatomic"
|
||||
mkdir -vp ${_projectatomic}
|
||||
ln -vsf $(pwd) ${_projectatomic}/skopeo
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go get -u github.com/cpuguy83/go-md2man github.com/golang/lint/golint
|
||||
go version
|
||||
go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_projectatomic}/skopeo
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local binary-local
|
||||
sudo make install
|
||||
skopeo -v
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
12
vendor.conf
12
vendor.conf
@@ -1,4 +1,6 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
@@ -10,9 +12,11 @@ github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
@@ -35,7 +39,7 @@ github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec 149252121d044fddff670adcdc67f33148e16226
|
||||
github.com/opencontainers/image-spec unique-ids https://github.com/mtrmac/image-spec
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
@@ -47,7 +51,6 @@ github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
@@ -60,3 +63,4 @@ golang.org/x/sys master
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability master
|
||||
|
||||
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# continuity
|
||||
|
||||
[](https://godoc.org/github.com/containerd/continuity)
|
||||
[](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Manifest Format
|
||||
|
||||
A continuity manifest encodes filesystem metadata in Protocol Buffers.
|
||||
Please refer to [proto/manifest.proto](proto/manifest.proto).
|
||||
|
||||
## Usage
|
||||
|
||||
Build:
|
||||
|
||||
```console
|
||||
$ make
|
||||
```
|
||||
|
||||
Create a manifest (of this repo itself):
|
||||
|
||||
```console
|
||||
$ ./bin/continuity build . > /tmp/a.pb
|
||||
```
|
||||
|
||||
Dump a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity ls /tmp/a.pb
|
||||
...
|
||||
-rw-rw-r-- 270 B /.gitignore
|
||||
-rw-rw-r-- 88 B /.mailmap
|
||||
-rw-rw-r-- 187 B /.travis.yml
|
||||
-rw-rw-r-- 359 B /AUTHORS
|
||||
-rw-rw-r-- 11 kB /LICENSE
|
||||
-rw-rw-r-- 1.5 kB /Makefile
|
||||
...
|
||||
-rw-rw-r-- 986 B /testutil_test.go
|
||||
drwxrwxr-x 0 B /version
|
||||
-rw-rw-r-- 478 B /version/version.go
|
||||
```
|
||||
|
||||
Verify a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
Break the directory and restore using the manifest:
|
||||
```console
|
||||
$ chmod 777 Makefile
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
|
||||
$ ./bin/continuity apply . /tmp/a.pb
|
||||
$ stat -c %a Makefile
|
||||
664
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
|
||||
## Contribution Guide
|
||||
### Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```console
|
||||
$ go generate ./proto
|
||||
```
|
||||
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
||||
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
|
||||
github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
|
||||
github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
|
||||
github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
|
||||
golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
|
||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
|
||||
14
vendor/github.com/containers/image/README.md
generated
vendored
14
vendor/github.com/containers/image/README.md
generated
vendored
@@ -25,7 +25,7 @@ them as necessary, and to sign and verify images.
|
||||
The containers/image project is only a library with no user interface;
|
||||
you can either incorporate it into your Go programs, or use the `skopeo` tool:
|
||||
|
||||
The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
|
||||
The [skopeo](https://github.com/containers/skopeo) tool uses the
|
||||
containers/image library and takes advantage of many of its features,
|
||||
e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
|
||||
|
||||
@@ -42,7 +42,7 @@ What this project tests against dependencies-wise is located
|
||||
## Building
|
||||
|
||||
If you want to see what the library can do, or an example of how it is called,
|
||||
consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool
|
||||
consider starting with the [skopeo](https://github.com/containers/skopeo) tool
|
||||
instead.
|
||||
|
||||
To integrate this library into your project, put it into `$GOPATH` or use
|
||||
@@ -53,7 +53,7 @@ are also available
|
||||
|
||||
This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
|
||||
```sh
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
|
||||
@@ -65,13 +65,17 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## Contributing
|
||||
## [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
|
||||
|
||||
## License
|
||||
|
||||
ASL 2.0
|
||||
Apache License 2.0
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
## Contact
|
||||
|
||||
|
||||
78
vendor/github.com/containers/image/copy/copy.go
generated
vendored
78
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -71,7 +71,6 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
copiedBlobs map[digest.Digest]digest.Digest
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
@@ -103,8 +102,9 @@ type Options struct {
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
@@ -122,7 +122,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
@@ -132,7 +132,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
@@ -141,7 +141,6 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}()
|
||||
|
||||
c := &copier{
|
||||
copiedBlobs: make(map[digest.Digest]digest.Digest),
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
@@ -153,63 +152,63 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
// The simple case: Just copy a single image.
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// This is a manifest list. Choose a single image and copy it.
|
||||
// FIXME: Copy to destinations which support manifest lists, one image at a time.
|
||||
instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) {
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifest []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
}
|
||||
if multiImage {
|
||||
return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
// (the multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
return nil, errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
@@ -219,14 +218,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.Signatures(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
return nil, errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
return nil, errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,28 +238,28 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err := ic.copyUpdatedConfigAndManifest(ctx)
|
||||
manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -268,14 +267,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
@@ -296,24 +295,24 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifest, options.SignBy)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs); err != nil {
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
@@ -347,6 +346,9 @@ func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.Sy
|
||||
|
||||
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
|
||||
func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
if ic.c.dest.IgnoresEmbeddedDockerReference() {
|
||||
return nil // Destination would prefer us not to update the embedded reference.
|
||||
}
|
||||
destRef := ic.c.dest.Reference().DockerReference()
|
||||
if destRef == nil {
|
||||
return nil // Destination does not care about Docker references
|
||||
@@ -532,20 +534,21 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
var diffIDResult diffIDResult // = {digest:""}
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult = <-diffIDChan:
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
} else {
|
||||
return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
|
||||
}
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
@@ -601,6 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer s.Close()
|
||||
stream = s
|
||||
}
|
||||
|
||||
@@ -670,10 +674,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
destStream, err = decompressor(destStream)
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer s.Close()
|
||||
destStream = s
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
|
||||
7
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
7
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -117,6 +117,13 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
6
vendor/github.com/containers/image/docker/daemon/client.go
generated
vendored
6
vendor/github.com/containers/image/docker/daemon/client.go
generated
vendored
@@ -30,13 +30,13 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
//
|
||||
// Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set
|
||||
// TLSClientConfig to nil. This can be achieved by using the form `http://`
|
||||
proto, _, _, err := dockerclient.ParseHost(host)
|
||||
url, err := dockerclient.ParseHostURL(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var httpClient *http.Client
|
||||
if proto != "unix" {
|
||||
if proto == "http" {
|
||||
if url.Scheme != "unix" {
|
||||
if url.Scheme == "http" {
|
||||
httpClient = httpConfig()
|
||||
} else {
|
||||
hc, err := tlsConfig(sys)
|
||||
|
||||
109
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
109
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -100,6 +100,19 @@ type authScope struct {
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
type sendAuth int
|
||||
|
||||
const (
|
||||
// v2 endpoint with authentication.
|
||||
v2Auth sendAuth = iota
|
||||
// v1 endpoint with authentication.
|
||||
// TODO: Get v1Auth working
|
||||
// v1Auth
|
||||
// no authentication, works for both v1 and v2.
|
||||
noAuth
|
||||
)
|
||||
|
||||
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
token := new(bearerToken)
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
@@ -234,7 +247,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil)
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +259,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorizedForCredentials
|
||||
default:
|
||||
return errors.Errorf("error occured with status code %q", resp.StatusCode)
|
||||
return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,25 +293,60 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
v2Res := &V2Results{}
|
||||
v1Res := &V1Results{}
|
||||
|
||||
// Get credentials from authfile for the underlying hostname
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
|
||||
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
|
||||
if registry == dockerHostname {
|
||||
registry = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClientWithDetails(sys, registry, "", "", "", nil, "")
|
||||
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
|
||||
// Only try the v1 search endpoint if the search query is not empty. If it is
|
||||
// empty skip to the v2 endpoint.
|
||||
if image != "" {
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
return nil, err
|
||||
@@ -316,50 +364,25 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
}
|
||||
}
|
||||
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err = client.makeRequest(ctx, "GET", u.String(), nil, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -377,7 +400,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
if sendAuth {
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -443,6 +466,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
}
|
||||
authReq = authReq.WithContext(ctx)
|
||||
getParams := authReq.URL.Query()
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
@@ -453,6 +479,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
if c.username != "" && c.password != "" {
|
||||
authReq.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
// TODO(runcom): insecure for now to contact the external token service
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
@@ -468,7 +495,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -487,15 +514,15 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode)
|
||||
return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
@@ -514,9 +541,9 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
@@ -541,7 +568,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -66,14 +66,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
||||
var tagsHolder struct {
|
||||
|
||||
21
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
21
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -95,6 +95,13 @@ func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
|
||||
}
|
||||
|
||||
// sizeCounter is an io.Writer which only counts the total size of its input.
|
||||
type sizeCounter struct{ size int64 }
|
||||
|
||||
@@ -123,7 +130,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -140,7 +147,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -159,7 +166,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -184,7 +191,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -200,7 +207,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -230,7 +237,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -435,7 +442,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
25
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
25
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -89,7 +89,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -137,10 +137,10 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, false)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
@@ -169,13 +169,13 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
@@ -274,7 +274,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -310,7 +310,14 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "push")
|
||||
// docker/distribution does not document what action should be used for deleting images.
|
||||
//
|
||||
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
|
||||
// quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included.
|
||||
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
|
||||
//
|
||||
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +332,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -347,7 +354,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil)
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
7
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@@ -75,6 +75,13 @@ func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
102
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
102
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -3,7 +3,6 @@ package tarfile
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@@ -43,8 +42,7 @@ type layerInfo struct {
|
||||
// the source of an image.
|
||||
// To do for both the NewSourceFromFile and NewSourceFromStream functions
|
||||
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path
|
||||
// NewSourceFromFile supports both conpressed and uncompressed input
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path.
|
||||
func NewSourceFromFile(path string) (*Source, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
@@ -52,19 +50,24 @@ func NewSourceFromFile(path string) (*Source, error) {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader, err := gzip.NewReader(file)
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewSourceFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return &Source{
|
||||
tarPath: path,
|
||||
}, nil
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
return NewSourceFromStream(reader)
|
||||
return NewSourceFromStream(stream)
|
||||
}
|
||||
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream, which must be uncompressed.
|
||||
// The caller can close the inputStream immediately after NewSourceFromFile returns.
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
// FIXME: use SystemContext here.
|
||||
// Save inputStream to a temporary file
|
||||
@@ -81,8 +84,20 @@ func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
@@ -292,7 +307,25 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
li.size = h.Size
|
||||
// Since GetBlob will decompress layers that are compressed we need
|
||||
// to do the decompression here as well, otherwise we will
|
||||
// incorrectly report the size. Pretty critical, since tools like
|
||||
// umoci always compress layer blobs. Obviously we only bother with
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
}
|
||||
}
|
||||
@@ -346,16 +379,22 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
type readCloseWrapper struct {
|
||||
// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
|
||||
type uncompressedReadCloser struct {
|
||||
io.Reader
|
||||
closeFunc func() error
|
||||
underlyingCloser func() error
|
||||
uncompressedCloser func() error
|
||||
}
|
||||
|
||||
func (r readCloseWrapper) Close() error {
|
||||
if r.closeFunc != nil {
|
||||
return r.closeFunc()
|
||||
func (r uncompressedReadCloser) Close() error {
|
||||
var res error
|
||||
if err := r.uncompressedCloser(); err != nil {
|
||||
res = err
|
||||
}
|
||||
return nil
|
||||
if err := r.underlyingCloser(); err != nil && res == nil {
|
||||
res = err
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
@@ -369,10 +408,16 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
stream, err := s.openTarComponent(li.path)
|
||||
underlyingStream, err := s.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
closeUnderlyingStream := true
|
||||
defer func() {
|
||||
if closeUnderlyingStream {
|
||||
underlyingStream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||
// caller which is trying to verify the blob will run into problems),
|
||||
@@ -386,22 +431,17 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
// be verifing a "digest" which is not the actual layer's digest (but
|
||||
// is instead the DiffID).
|
||||
|
||||
decompressFunc, reader, err := compression.DetectCompression(stream)
|
||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
||||
return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
|
||||
}
|
||||
|
||||
if decompressFunc != nil {
|
||||
reader, err = decompressFunc(reader)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
newStream := readCloseWrapper{
|
||||
Reader: reader,
|
||||
closeFunc: stream.Close,
|
||||
newStream := uncompressedReadCloser{
|
||||
Reader: uncompressedStream,
|
||||
underlyingCloser: underlyingStream.Close,
|
||||
uncompressedCloser: uncompressedStream.Close,
|
||||
}
|
||||
closeUnderlyingStream = false
|
||||
|
||||
return newStream, li.size, nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
% POLICY.JSON(5) policy.json Man Page
|
||||
% CONTAINERS-POLICY.JSON(5) policy.json Man Page
|
||||
% Miloslav Trmač
|
||||
% September 2016
|
||||
|
||||
# NAME
|
||||
policy.json - syntax for the signature verification policy file
|
||||
containers-policy.json - syntax for the signature verification policy file
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
@@ -278,4 +278,6 @@ selectively allow individual transports and scopes as desired.
|
||||
atomic(1)
|
||||
|
||||
## HISTORY
|
||||
August 2018, Rename to containers-policy.json(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
|
||||
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML. The valid
|
||||
categories are: 'registries.search', 'registries.insecure', and
|
||||
'registries.block'.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
|
||||
# EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
@@ -1,7 +1,11 @@
|
||||
% REGISTRIES.D(5) Registries.d Man Page
|
||||
% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page
|
||||
% Miloslav Trmač
|
||||
% August 2016
|
||||
# Registries Configuration Directory
|
||||
|
||||
# NAME
|
||||
containers-registries.d - Directory for various registries configurations
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The registries configuration directory contains configuration for various registries
|
||||
(servers storing remote container images), and for content stored in them,
|
||||
41
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
41
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
@@ -1,41 +0,0 @@
|
||||
% registries.conf(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The REGISTRIES configuration file is a system-wide configuration file for container image
|
||||
registries. The file format is TOML.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build simple list format for registries under two
|
||||
categories: `search` and `insecure`. You can list multiple registries using
|
||||
as a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the
|
||||
registries listed under search.
|
||||
|
||||
The following example configuration defines two searchable registries and one
|
||||
insecure registry.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ["registry1.com", "registry2.com"]
|
||||
|
||||
[registries.insecure]
|
||||
registries = ["registry3.com"]
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@@ -2,7 +2,6 @@ package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -25,8 +24,12 @@ func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
|
||||
}
|
||||
|
||||
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
|
||||
return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) {
|
||||
m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema1{m: m}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) serialize() ([]byte, error) {
|
||||
@@ -64,7 +67,7 @@ func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, erro
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@@ -148,12 +151,12 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife
|
||||
|
||||
// Based on github.com/docker/docker/distribution/pull_v2.go
|
||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
|
||||
if len(m.m.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
if len(m.m.ExtractedV1Compatibility) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.m.History) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
|
||||
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
|
||||
}
|
||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
|
||||
@@ -165,14 +168,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
// Build a list of the diffIDs for the non-empty layers.
|
||||
diffIDs := []digest.Digest{}
|
||||
var layers []manifest.Schema2Descriptor
|
||||
for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.History) - 1) - v1Index
|
||||
for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index
|
||||
|
||||
var v1compat manifest.Schema1V1Compatibility
|
||||
if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||
}
|
||||
if !v1compat.ThrowAway {
|
||||
if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway {
|
||||
var size int64
|
||||
if uploadedLayerInfos != nil {
|
||||
size = uploadedLayerInfos[v2Index].Size
|
||||
|
||||
29
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
29
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -18,17 +18,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
|
||||
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
|
||||
// in registries, so let’s use the same values.
|
||||
var gzippedEmptyLayer = []byte{
|
||||
var GzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
|
||||
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
|
||||
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
|
||||
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
type manifestSchema2 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
@@ -95,11 +95,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.ConfigDescriptor.Digest,
|
||||
Size: m.m.ConfigDescriptor.Size,
|
||||
URLs: m.m.ConfigDescriptor.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,7 +117,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@@ -253,16 +249,16 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}, false)
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
if info.Digest != gzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
if info.Digest != GzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
blobDigest = gzippedEmptyLayerDigest
|
||||
blobDigest = GzippedEmptyLayerDigest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
|
||||
@@ -308,7 +304,10 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
}
|
||||
history[0].V1Compatibility = string(v1Config)
|
||||
|
||||
m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
if err != nil {
|
||||
return nil, err // This should never happen, we should have created all the components correctly.
|
||||
}
|
||||
return memoryImageFromManifest(m1), nil
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
@@ -62,3 +62,12 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
}
|
||||
}
|
||||
|
||||
// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
|
||||
func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
|
||||
blobs := make([]types.BlobInfo, len(layers))
|
||||
for i, layer := range layers {
|
||||
blobs[i] = layer.BlobInfo
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/image/oci.go
generated
vendored
8
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -60,11 +60,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.Config.Digest,
|
||||
Size: m.m.Config.Size,
|
||||
URLs: m.m.Config.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -101,7 +97,7 @@ func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error)
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
|
||||
119
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
119
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
@@ -25,25 +25,28 @@ type Schema1History struct {
|
||||
|
||||
// Schema1 is a manifest in docker/distribution schema 1.
|
||||
type Schema1 struct {
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility!
|
||||
ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image)
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
}
|
||||
|
||||
type schema1V1CompatibilityContainerConfig struct {
|
||||
Cmd []string
|
||||
}
|
||||
|
||||
// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
|
||||
type Schema1V1Compatibility struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig struct {
|
||||
Cmd []string
|
||||
} `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
|
||||
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
|
||||
@@ -57,11 +60,8 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
if s1.SchemaVersion != 1 {
|
||||
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
||||
}
|
||||
if len(s1.FSLayers) != len(s1.History) {
|
||||
return nil, errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(s1.FSLayers) == 0 {
|
||||
return nil, errors.New("no FSLayers in manifest")
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s1.fixManifestLayers(); err != nil {
|
||||
return nil, err
|
||||
@@ -70,7 +70,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
}
|
||||
|
||||
// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) {
|
||||
var name, tag string
|
||||
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
||||
name = reference.Path(ref)
|
||||
@@ -78,7 +78,7 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
}
|
||||
return &Schema1{
|
||||
s1 := Schema1{
|
||||
Name: name,
|
||||
Tag: tag,
|
||||
Architecture: architecture,
|
||||
@@ -86,6 +86,10 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
History: history,
|
||||
SchemaVersion: 1,
|
||||
}
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s1, nil
|
||||
}
|
||||
|
||||
// Schema1Clone creates a copy of the supplied Schema1 manifest.
|
||||
@@ -94,31 +98,51 @@ func Schema1Clone(src *Schema1) *Schema1 {
|
||||
return ©
|
||||
}
|
||||
|
||||
// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest.
|
||||
func (m *Schema1) initialize() error {
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(m.FSLayers) == 0 {
|
||||
return errors.New("no FSLayers in manifest")
|
||||
}
|
||||
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
|
||||
for i, h := range m.History {
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{}
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema1) LayerInfos() []types.BlobInfo {
|
||||
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||
func (m *Schema1) LayerInfos() []LayerInfo {
|
||||
layers := make([]LayerInfo, len(m.FSLayers))
|
||||
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
||||
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||
layers[(len(m.FSLayers)-1)-i] = LayerInfo{
|
||||
BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1},
|
||||
EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway,
|
||||
}
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
|
||||
if len(m.FSLayers) != len(layerInfos) {
|
||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
|
||||
}
|
||||
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
|
||||
@@ -144,31 +168,19 @@ func (m *Schema1) Serialize() ([]byte, error) {
|
||||
// Note that even after this succeeds, m.FSLayers may contain duplicate entries
|
||||
// (for Dockerfile operations which change the configuration but not the filesystem).
|
||||
func (m *Schema1) fixManifestLayers() error {
|
||||
type imageV1 struct {
|
||||
ID string
|
||||
Parent string
|
||||
}
|
||||
// Per the specification, we can assume that len(m.FSLayers) == len(m.History)
|
||||
imgs := make([]*imageV1, len(m.FSLayers))
|
||||
for i := range m.FSLayers {
|
||||
img := &imageV1{}
|
||||
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := validateV1ID(img.ID); err != nil {
|
||||
// m.initialize() has verified that len(m.FSLayers) == len(m.History)
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
if err := validateV1ID(compat.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if imgs[len(imgs)-1].Parent != "" {
|
||||
if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" {
|
||||
return errors.New("Invalid parent ID in the base layer of the image")
|
||||
}
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
for _, img := range m.ExtractedV1Compatibility {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
@@ -177,12 +189,13 @@ func (m *Schema1) fixManifestLayers() error {
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
|
||||
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -209,7 +222,7 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
|
||||
DockerVersion: s1.DockerVersion,
|
||||
Architecture: s1.Architecture,
|
||||
Os: s1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s1.Config != nil {
|
||||
i.Labels = s1.Config.Labels
|
||||
@@ -240,11 +253,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
}
|
||||
// Build the history.
|
||||
convertedHistory := []Schema2History{}
|
||||
for _, h := range m.History {
|
||||
compat := Schema1V1Compatibility{}
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "error decoding history information")
|
||||
}
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
hitem := Schema2History{
|
||||
Created: compat.Created,
|
||||
CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
|
||||
|
||||
32
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
32
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
@@ -18,6 +18,16 @@ type Schema2Descriptor struct {
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
|
||||
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
|
||||
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// Schema2 is a manifest in docker/distribution schema 2.
|
||||
type Schema2 struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
@@ -47,8 +57,9 @@ type Schema2HealthConfig struct {
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
@@ -171,19 +182,18 @@ func Schema2Clone(src *Schema2) *Schema2 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema2) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||
return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema2) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *Schema2) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.LayersDescriptors {
|
||||
blobs = append(blobs, types.BlobInfo{
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
URLs: layer.URLs,
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
@@ -227,7 +237,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
||||
DockerVersion: s2.DockerVersion,
|
||||
Architecture: s2.Architecture,
|
||||
Os: s2.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s2.Config != nil {
|
||||
i.Labels = s2.Config.Labels
|
||||
|
||||
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@@ -50,10 +50,10 @@ var DefaultRequestedManifestMIMETypes = []string{
|
||||
type Manifest interface {
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
ConfigInfo() types.BlobInfo
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
LayerInfos() []types.BlobInfo
|
||||
LayerInfos() []LayerInfo
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
UpdateLayerInfos(layerInfos []types.BlobInfo) error
|
||||
|
||||
@@ -73,6 +73,12 @@ type Manifest interface {
|
||||
Serialize() ([]byte, error)
|
||||
}
|
||||
|
||||
// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos.
|
||||
type LayerInfo struct {
|
||||
types.BlobInfo
|
||||
EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||
}
|
||||
|
||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
@@ -227,9 +233,9 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func LayerInfosToStrings(infos []types.BlobInfo) []string {
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
layers := make([]string, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i] = info.Digest.String()
|
||||
|
||||
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
@@ -10,6 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
Annotations: desc.Annotations,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// OCI1 is a manifest.Manifest implementation for OCI images.
|
||||
// The underlying data from imgspecv1.Manifest is also available.
|
||||
type OCI1 struct {
|
||||
@@ -45,16 +56,19 @@ func OCI1Clone(src *OCI1) *OCI1 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *OCI1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations}
|
||||
return BlobInfoFromOCI1Descriptor(m.Config)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *OCI1) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *OCI1) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.Layers {
|
||||
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
@@ -101,7 +115,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Os: v1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
7
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@@ -70,6 +70,13 @@ func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
|
||||
return d.unpackedDest.MustMatchRuntimeOS()
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
18
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
18
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@@ -18,9 +18,10 @@ import (
|
||||
)
|
||||
|
||||
type ociImageDestination struct {
|
||||
ref ociReference
|
||||
index imgspecv1.Index
|
||||
sharedBlobDir string
|
||||
ref ociReference
|
||||
index imgspecv1.Index
|
||||
sharedBlobDir string
|
||||
acceptUncompressedLayers bool
|
||||
}
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
@@ -43,6 +44,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
|
||||
d := &ociImageDestination{ref: ref, index: *index}
|
||||
if sys != nil {
|
||||
d.sharedBlobDir = sys.OCISharedBlobDirPath
|
||||
d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers
|
||||
}
|
||||
|
||||
if err := ensureDirectoryExists(d.ref.dir); err != nil {
|
||||
@@ -81,6 +83,9 @@ func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
if d.acceptUncompressedLayers {
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
return types.Compress
|
||||
}
|
||||
|
||||
@@ -95,6 +100,13 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
12
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
12
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
@@ -23,8 +23,14 @@ func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
var Transport = ociTransport{}
|
||||
var (
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
Transport = ociTransport{}
|
||||
|
||||
// ErrMoreThanOneImage is an error returned when the manifest includes
|
||||
// more than one image and the user should choose which one to use.
|
||||
ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image")
|
||||
)
|
||||
|
||||
type ociTransport struct{}
|
||||
|
||||
@@ -184,7 +190,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
||||
d = &index.Manifests[0]
|
||||
} else {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image")
|
||||
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
|
||||
}
|
||||
} else {
|
||||
// if image specified, look through all manifests for a match
|
||||
|
||||
9
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
9
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -127,7 +127,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
if statusValid {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
|
||||
}
|
||||
|
||||
return body, nil
|
||||
@@ -369,6 +369,13 @@ func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.docker.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
||||
15
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
15
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -124,6 +125,13 @@ func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
@@ -394,6 +402,9 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) Commit(ctx context.Context) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
repo, err := otbuiltin.OpenRepo(d.ref.repo)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -456,7 +467,9 @@ func (d *ostreeImageDestination) Commit(ctx context.Context) error {
|
||||
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
|
||||
fmt.Sprintf("signatures=%d", d.signaturesLen),
|
||||
fmt.Sprintf("docker.digest=%s", string(d.digest))}
|
||||
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
|
||||
if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = repo.CommitTransaction()
|
||||
return err
|
||||
|
||||
41
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
41
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@@ -5,28 +5,34 @@ import (
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
type DecompressorFunc func(io.Reader) (io.Reader, error)
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.Reader, error) {
|
||||
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
func Bzip2Decompressor(r io.Reader) (io.Reader, error) {
|
||||
return bzip2.NewReader(r), nil
|
||||
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bzip2.NewReader(r)), nil
|
||||
}
|
||||
|
||||
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
|
||||
func XzDecompressor(r io.Reader) (io.Reader, error) {
|
||||
return nil, errors.New("Decompressing xz streams is not supported")
|
||||
func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
r, err := xz.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(r), nil
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
@@ -65,3 +71,24 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
|
||||
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// AutoDecompress takes a stream and returns an uncompressed version of the
|
||||
// same stream.
|
||||
// The caller must call Close() on the returned stream (even if the input does not need,
|
||||
// or does not even support, closing!).
|
||||
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
||||
decompressor, stream, err := DetectCompression(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "Error detecting compression")
|
||||
}
|
||||
var res io.ReadCloser
|
||||
if decompressor != nil {
|
||||
res, err = decompressor(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "Error initializing decompression")
|
||||
}
|
||||
} else {
|
||||
res = ioutil.NopCloser(stream)
|
||||
}
|
||||
return res, decompressor != nil, nil
|
||||
}
|
||||
|
||||
44
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
44
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
@@ -27,16 +26,12 @@ type dockerConfigFile struct {
|
||||
CredHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPath = "/run"
|
||||
authCfg = "containers"
|
||||
authCfgFileName = "auth.json"
|
||||
dockerCfg = ".docker"
|
||||
dockerCfgFileName = "config.json"
|
||||
dockerLegacyCfg = ".dockercfg"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
||||
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
|
||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||
dockerLegacyHomePath = ".dockercfg"
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
@@ -64,7 +59,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
|
||||
}
|
||||
|
||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg)
|
||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
|
||||
var paths []string
|
||||
pathToAuth, err := getPathToAuth(sys)
|
||||
if err == nil {
|
||||
@@ -75,7 +70,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
// Logging the error as a warning instead and moving on to pulling the image
|
||||
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
||||
}
|
||||
paths = append(paths, filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath)
|
||||
paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath)
|
||||
|
||||
for _, path := range paths {
|
||||
legacyFormat := path == dockerLegacyPath
|
||||
@@ -135,15 +130,15 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
|
||||
// getPath gets the path of the auth.json file
|
||||
// The path can be overriden by the user if the overwrite-path flag is set
|
||||
// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers
|
||||
// Otherwise, the auth.json file is stored in /run/user/UID/containers
|
||||
// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers
|
||||
// Otherwise, the auth.json file is stored in /run/containers/UID
|
||||
func getPathToAuth(sys *types.SystemContext) (string, error) {
|
||||
if sys != nil {
|
||||
if sys.AuthFilePath != "" {
|
||||
return sys.AuthFilePath, nil
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName), nil
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,14 +150,12 @@ func getPathToAuth(sys *types.SystemContext) (string, error) {
|
||||
if os.IsNotExist(err) {
|
||||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of …/authCfgFileName inside.
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
|
||||
} // else ignore err and let the caller fail accessing …/authCfgFileName.
|
||||
runtimeDir = filepath.Join(runtimeDir, authCfg)
|
||||
} else {
|
||||
runtimeDir = filepath.Join(defaultPath, authCfg, strconv.Itoa(os.Getuid()))
|
||||
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
|
||||
return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil
|
||||
}
|
||||
return filepath.Join(runtimeDir, authCfgFileName), nil
|
||||
return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil
|
||||
}
|
||||
|
||||
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
|
||||
@@ -172,9 +165,12 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
|
||||
raw, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
}
|
||||
return dockerConfigFile{}, err
|
||||
}
|
||||
|
||||
if legacyFormat {
|
||||
|
||||
20
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
20
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@@ -34,16 +34,26 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
for _, f := range fs {
|
||||
fullPath := filepath.Join(dir, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := ioutil.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if tlsc.RootCAs == nil {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
|
||||
155
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
155
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@@ -50,8 +50,7 @@ type storageImageSource struct {
|
||||
}
|
||||
|
||||
type storageImageDestination struct {
|
||||
imageRef storageReference // The reference we'll use to name the image
|
||||
publicRef storageReference // The reference we return when asked about the name we'll give to the image
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
@@ -102,6 +101,9 @@ func (s storageImageSource) Close() error {
|
||||
|
||||
// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
rc, n, _, err = s.getBlobAndLayerID(info)
|
||||
return rc, n, err
|
||||
}
|
||||
@@ -175,11 +177,15 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
// the image, after they've been decompressed.
|
||||
func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
updatedBlobInfos := []types.BlobInfo{}
|
||||
_, manifestType, err := s.GetManifest(ctx, nil)
|
||||
manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID)
|
||||
}
|
||||
man, err := manifest.FromBlob(manifestBlob, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID)
|
||||
}
|
||||
|
||||
uncompressedLayerType := ""
|
||||
switch manifestType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
@@ -188,6 +194,8 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo
|
||||
// This is actually a compressed type, but there's no uncompressed type defined
|
||||
uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType
|
||||
}
|
||||
|
||||
physicalBlobInfos := []types.BlobInfo{}
|
||||
layerID := s.image.TopLayer
|
||||
for layerID != "" {
|
||||
layer, err := s.imageRef.transport.store.Layer(layerID)
|
||||
@@ -205,10 +213,43 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo
|
||||
Size: layer.UncompressedSize,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...)
|
||||
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
||||
layerID = layer.Parent
|
||||
}
|
||||
return updatedBlobInfos, nil
|
||||
|
||||
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
|
||||
// but using layer data which we can actually produce — physicalInfos for non-empty layers,
|
||||
// and image.GzippedEmptyLayer for empty ones.
|
||||
// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
|
||||
func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
|
||||
nextPhysical := 0
|
||||
res := make([]types.BlobInfo, len(manifestInfos))
|
||||
for i, mi := range manifestInfos {
|
||||
if mi.EmptyLayer {
|
||||
res[i] = types.BlobInfo{
|
||||
Digest: image.GzippedEmptyLayerDigest,
|
||||
Size: int64(len(image.GzippedEmptyLayer)),
|
||||
MediaType: mi.MediaType,
|
||||
}
|
||||
} else {
|
||||
if nextPhysical >= len(physicalInfos) {
|
||||
return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
|
||||
}
|
||||
res[i] = physicalInfos[nextPhysical]
|
||||
nextPhysical++
|
||||
}
|
||||
}
|
||||
if nextPhysical != len(physicalInfos) {
|
||||
return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// GetSignatures() parses the image's signatures blob into a slice of byte slices.
|
||||
@@ -243,15 +284,8 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating a temporary directory")
|
||||
}
|
||||
// Break reading of the reference we're writing, so that copy.Image() won't try to rewrite
|
||||
// schema1 image manifests to remove embedded references, since that changes the manifest's
|
||||
// digest, and that makes the image unusable if we subsequently try to access it using a
|
||||
// reference that mentions the no-longer-correct digest.
|
||||
publicRef := imageRef
|
||||
publicRef.name = nil
|
||||
image := &storageImageDestination{
|
||||
imageRef: imageRef,
|
||||
publicRef: publicRef,
|
||||
directory: directory,
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
@@ -261,11 +295,10 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// Reference returns a mostly-usable image reference that can't return a DockerReference, to
|
||||
// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to
|
||||
// remove image names that they contain which don't match the value we're using.
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
return s.publicRef
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
// Close cleans up the temporary directory.
|
||||
@@ -280,6 +313,10 @@ func (s storageImageDestination) DesiredLayerCompression() types.LayerCompressio
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
}
|
||||
|
||||
// PutBlob stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
@@ -295,7 +332,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
}
|
||||
diffID := digest.Canonical.Digester()
|
||||
filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
filename := s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
@@ -408,12 +445,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
case *manifest.Schema1:
|
||||
// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
|
||||
// in reverse of the order in which they were originally listed.
|
||||
for i, history := range m.History {
|
||||
compat := manifest.Schema1V1Compatibility{}
|
||||
if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil {
|
||||
logrus.Debugf("internal error reading schema 1 history: %v", err)
|
||||
return ""
|
||||
}
|
||||
for i, compat := range m.ExtractedV1Compatibility {
|
||||
if compat.ThrowAway {
|
||||
continue
|
||||
}
|
||||
@@ -471,9 +503,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
layerBlobs := man.LayerInfos()
|
||||
// Extract or find the layers.
|
||||
lastLayer := ""
|
||||
addedLayers := []string{}
|
||||
for _, blob := range layerBlobs {
|
||||
var diff io.ReadCloser
|
||||
if blob.EmptyLayer {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
|
||||
@@ -481,7 +515,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.HasBlob(ctx, blob)
|
||||
has, _, err := s.HasBlob(ctx, blob.BlobInfo)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
@@ -502,19 +536,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
lastLayer = layer.ID
|
||||
continue
|
||||
}
|
||||
// Check if we cached a file with that blobsum. If we didn't already have a layer with
|
||||
// the blob's contents, we should have gotten a copy.
|
||||
if filename, ok := s.filenames[blob.Digest]; ok {
|
||||
// Use the file's contents to initialize the layer.
|
||||
file, err2 := os.Open(filename)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
diff = file
|
||||
}
|
||||
if diff == nil {
|
||||
// Try to find a layer with contents matching that blobsum.
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
filename, ok := s.filenames[blob.Digest]
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
@@ -528,29 +554,51 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if layer == "" {
|
||||
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
|
||||
}
|
||||
// Use the layer's contents to initialize the new layer.
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
|
||||
}
|
||||
defer diff.Close()
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
// ctx.Done().
|
||||
_, err = io.Copy(file, diff)
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error storing blob to file %q", filename)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
s.filenames[blob.Digest] = filename
|
||||
}
|
||||
if diff == nil {
|
||||
// This shouldn't have happened.
|
||||
return errors.Errorf("error applying blob %q: content not found", blob.Digest)
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, diff)
|
||||
if err != nil {
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
||||
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
addedLayers = append([]string{lastLayer}, addedLayers...)
|
||||
}
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
// was originally created, in case we're just copying it. If not, no harm done.
|
||||
@@ -613,7 +661,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
|
||||
names := []string{}
|
||||
if name != nil {
|
||||
names = append(names, verboseName(name))
|
||||
names = append(names, name.String())
|
||||
}
|
||||
if len(oldNames) > 0 {
|
||||
names = append(names, oldNames...)
|
||||
@@ -703,6 +751,13 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return true // Yes, we want the unmodified manifest
|
||||
}
|
||||
|
||||
// PutSignatures records the image's signatures for committing as a single data blob.
|
||||
func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
sizes := []int{}
|
||||
|
||||
139
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
139
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -17,55 +16,65 @@ import (
|
||||
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
||||
// value hex-encoded into a 64-character string, and a reference to a Store
|
||||
// where an image is, or would be, kept.
|
||||
// Either "named" or "id" must be set.
|
||||
type storageReference struct {
|
||||
transport storageTransport
|
||||
reference string
|
||||
named reference.Named // may include a tag and/or a digest
|
||||
id string
|
||||
name reference.Named
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
|
||||
func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) {
|
||||
if named == nil && id == "" {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
// We take a copy of the transport, which contains a pointer to the
|
||||
// store that it used for resolving this reference, so that the
|
||||
// transport that we'll return from Transport() won't be affected by
|
||||
// further calls to the original transport's SetStore() method.
|
||||
return &storageReference{
|
||||
transport: transport,
|
||||
reference: reference,
|
||||
named: named,
|
||||
id: id,
|
||||
name: name,
|
||||
tag: tag,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
|
||||
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
|
||||
repo := ref.Name()
|
||||
for _, name := range image.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if named.Name() == repo {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Resolve the reference's name to an image ID in the store, if there's already
|
||||
// one present with the same name or ID, and return the image.
|
||||
func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
var loadedImage *storage.Image
|
||||
if s.id == "" {
|
||||
// Look for an image that has the expanded reference name as an explicit Name value.
|
||||
image, err := s.transport.store.Image(s.reference)
|
||||
image, err := s.transport.store.Image(s.named.String())
|
||||
if image != nil && err == nil {
|
||||
loadedImage = image
|
||||
s.id = image.ID
|
||||
}
|
||||
}
|
||||
if s.id == "" && s.name != nil && s.digest != "" {
|
||||
// Look for an image with the specified digest that has the same name,
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(s.digest)
|
||||
if images != nil && err == nil {
|
||||
repo := reference.FamiliarName(reference.TrimNamed(s.name))
|
||||
search:
|
||||
for _, image := range images {
|
||||
for _, name := range image.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
|
||||
s.id = image.ID
|
||||
break search
|
||||
}
|
||||
if s.id == "" && s.named != nil {
|
||||
if digested, ok := s.named.(reference.Digested); ok {
|
||||
// Look for an image with the specified digest that has the same name,
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
||||
if images != nil && err == nil {
|
||||
for _, image := range images {
|
||||
if imageMatchesRepo(image, s.named) {
|
||||
loadedImage = image
|
||||
s.id = image.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,27 +84,20 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
}
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", s.id)
|
||||
}
|
||||
if s.name != nil {
|
||||
repo := reference.FamiliarName(reference.TrimNamed(s.name))
|
||||
nameMatch := false
|
||||
for _, name := range img.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
|
||||
nameMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", s.id)
|
||||
}
|
||||
if !nameMatch {
|
||||
loadedImage = img
|
||||
}
|
||||
if s.named != nil {
|
||||
if !imageMatchesRepo(loadedImage, s.named) {
|
||||
logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
return loadedImage, nil
|
||||
}
|
||||
|
||||
// Return a Transport object that defaults to using the same store that we used
|
||||
@@ -110,20 +112,7 @@ func (s storageReference) Transport() types.ImageTransport {
|
||||
|
||||
// Return a name with a tag or digest, if we have either, else return it bare.
|
||||
func (s storageReference) DockerReference() reference.Named {
|
||||
if s.name == nil {
|
||||
return nil
|
||||
}
|
||||
if s.tag != "" {
|
||||
if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
|
||||
return namedTagged
|
||||
}
|
||||
}
|
||||
if s.digest != "" {
|
||||
if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
|
||||
return canonical
|
||||
}
|
||||
}
|
||||
return s.name
|
||||
return s.named
|
||||
}
|
||||
|
||||
// Return a name with a tag, prefixed with the graph root and driver name, to
|
||||
@@ -135,25 +124,25 @@ func (s storageReference) StringWithinTransport() string {
|
||||
if len(options) > 0 {
|
||||
optionsList = ":" + strings.Join(options, ",")
|
||||
}
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
||||
if s.reference == "" {
|
||||
return storeSpec + "@" + s.id
|
||||
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
||||
if s.named != nil {
|
||||
res = res + s.named.String()
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
if s.id != "" {
|
||||
res = res + "@" + s.id
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
return res
|
||||
}
|
||||
|
||||
func (s storageReference) PolicyConfigurationIdentity() string {
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
if s.name == nil {
|
||||
return storeSpec + "@" + s.id
|
||||
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
if s.named != nil {
|
||||
res = res + s.named.String()
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
if s.id != "" {
|
||||
res = res + "@" + s.id
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
return res
|
||||
}
|
||||
|
||||
// Also accept policy that's tied to the combination of the graph root and
|
||||
@@ -164,9 +153,17 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
|
||||
namespaces := []string{}
|
||||
if s.name != nil {
|
||||
name := reference.TrimNamed(s.name)
|
||||
components := strings.Split(name.String(), "/")
|
||||
if s.named != nil {
|
||||
if s.id != "" {
|
||||
// The reference without the ID is also a valid namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.named.String())
|
||||
}
|
||||
tagged, isTagged := s.named.(reference.Tagged)
|
||||
_, isDigested := s.named.(reference.Digested)
|
||||
if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag())
|
||||
}
|
||||
components := strings.Split(s.named.Name(), "/")
|
||||
for len(components) > 0 {
|
||||
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
||||
components = components[:len(components)-1]
|
||||
|
||||
192
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
192
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@@ -105,7 +106,6 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
|
||||
// ParseStoreReference takes a name or an ID, tries to figure out which it is
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
var name reference.Named
|
||||
if ref == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
|
||||
}
|
||||
@@ -118,66 +118,36 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||
ref = ref[closeIndex+1:]
|
||||
}
|
||||
|
||||
// The last segment, if there's more than one, is either a digest from a reference, or an image ID.
|
||||
// The reference may end with an image ID. Image IDs and digests use the same "@" separator;
|
||||
// here we only peel away an image ID, and leave digests alone.
|
||||
split := strings.LastIndex(ref, "@")
|
||||
idOrDigest := ""
|
||||
if split != -1 {
|
||||
// Peel off that last bit so that we can work on the rest.
|
||||
idOrDigest = ref[split+1:]
|
||||
if idOrDigest == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
||||
}
|
||||
ref = ref[:split]
|
||||
}
|
||||
|
||||
// The middle segment (now the last segment), if there is one, is a digest.
|
||||
split = strings.LastIndex(ref, "@")
|
||||
sum := digest.Digest("")
|
||||
if split != -1 {
|
||||
sum = digest.Digest(ref[split+1:])
|
||||
if sum == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
||||
}
|
||||
ref = ref[:split]
|
||||
}
|
||||
|
||||
// If we have something that unambiguously should be a digest, validate it, and then the third part,
|
||||
// if we have one, as an ID.
|
||||
id := ""
|
||||
if sum != "" {
|
||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
|
||||
if split != -1 {
|
||||
possibleID := ref[split+1:]
|
||||
if possibleID == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref)
|
||||
}
|
||||
if err := sum.Validate(); err != nil {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
||||
}
|
||||
id = idOrDigest
|
||||
if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
|
||||
// The ID is a truncated version of the ID of an image that's present in local storage,
|
||||
// so we might as well use the expanded value.
|
||||
id = img.ID
|
||||
}
|
||||
} else if idOrDigest != "" {
|
||||
// There was no middle portion, so the final portion could be either a digest or an ID.
|
||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
|
||||
// It's an ID.
|
||||
id = idOrDigest
|
||||
} else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
|
||||
// It's a digest.
|
||||
sum = idSum
|
||||
} else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
|
||||
// It's a truncated version of the ID of an image that's present in local storage,
|
||||
// and we may need the expanded value.
|
||||
id = img.ID
|
||||
} else {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
||||
// If it looks like a digest, leave it alone for now.
|
||||
if _, err := digest.Parse(possibleID); err != nil {
|
||||
// Otherwise…
|
||||
if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil {
|
||||
id = possibleID // … it is a full ID
|
||||
} else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
|
||||
// … it is a truncated version of the ID of an image that's present in local storage,
|
||||
// so we might as well use the expanded value.
|
||||
id = img.ID
|
||||
} else {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID)
|
||||
}
|
||||
// We have recognized an image ID; peel it off.
|
||||
ref = ref[:split]
|
||||
}
|
||||
}
|
||||
|
||||
// If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
|
||||
// If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's
|
||||
// at least of what we guess is a reasonable minimum length, because we don't want a really short value
|
||||
// like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
|
||||
if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
|
||||
if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") {
|
||||
if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
|
||||
// It's a truncated version of the ID of an image that's present in local storage;
|
||||
// we need to expand it.
|
||||
@@ -186,53 +156,24 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||
}
|
||||
}
|
||||
|
||||
// The initial portion is probably a name, possibly with a tag.
|
||||
var named reference.Named
|
||||
// Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been
|
||||
// completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest..
|
||||
if ref != "" {
|
||||
var err error
|
||||
if name, err = reference.ParseNormalizedNamed(ref); err != nil {
|
||||
named, err = reference.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
|
||||
}
|
||||
}
|
||||
if name == nil && sum == "" && id == "" {
|
||||
return nil, errors.Errorf("error parsing reference")
|
||||
named = reference.TagNameOnly(named)
|
||||
}
|
||||
|
||||
// Construct a copy of the store spec.
|
||||
optionsList := ""
|
||||
options := store.GraphOptions()
|
||||
if len(options) > 0 {
|
||||
optionsList = ":" + strings.Join(options, ",")
|
||||
result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
|
||||
|
||||
// Convert the name back into a reference string, if we got a name.
|
||||
refname := ""
|
||||
tag := ""
|
||||
if name != nil {
|
||||
if sum.Validate() == nil {
|
||||
canonical, err := reference.WithDigest(name, sum)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
|
||||
}
|
||||
refname = verboseName(canonical)
|
||||
} else {
|
||||
name = reference.TagNameOnly(name)
|
||||
tagged, ok := name.(reference.Tagged)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
|
||||
}
|
||||
refname = verboseName(name)
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
}
|
||||
if refname == "" {
|
||||
logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id)
|
||||
} else if id == "" {
|
||||
logrus.Debugf("parsed reference to refname into %q", storeSpec+refname)
|
||||
} else {
|
||||
logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id)
|
||||
}
|
||||
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
|
||||
logrus.Debugf("parsed reference into %q", result.StringWithinTransport())
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
@@ -252,7 +193,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
}
|
||||
|
||||
// ParseReference takes a name and a tag or digest and/or ID
|
||||
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
|
||||
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"),
|
||||
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
||||
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
|
||||
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
|
||||
@@ -338,7 +279,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
|
||||
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
|
||||
dref := ref.DockerReference()
|
||||
if dref != nil {
|
||||
if img, err := store.Image(verboseName(dref)); err == nil {
|
||||
if img, err := store.Image(dref.String()); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
@@ -399,52 +340,29 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if scope == "" {
|
||||
return nil
|
||||
}
|
||||
// But if there is anything left, it has to be a name, with or without
|
||||
// a tag, with or without an ID, since we don't return namespace values
|
||||
// that are just bare IDs.
|
||||
scopeInfo := strings.SplitN(scope, "@", 2)
|
||||
if len(scopeInfo) == 1 && scopeInfo[0] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
|
||||
fields := strings.SplitN(scope, "@", 3)
|
||||
switch len(fields) {
|
||||
case 1: // name only
|
||||
case 2: // name:tag@ID or name[:tag]@digest
|
||||
if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil {
|
||||
if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
|
||||
return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
|
||||
}
|
||||
}
|
||||
case 3: // name[:tag]@digest@ID
|
||||
if _, err := digest.Parse(fields[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
if _, err := digest.Parse("sha256:" + fields[2]); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = digest.Parse("sha256:" + scopeInfo[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return ErrInvalidReference
|
||||
default: // Coverage: This should never happen
|
||||
return errors.New("Internal error: unexpected number of fields form strings.SplitN")
|
||||
}
|
||||
// As for field[0], if it is non-empty at all:
|
||||
// FIXME? We could be verifying the various character set and length restrictions
|
||||
// from docker/distribution/reference.regexp.go, but other than that there
|
||||
// are few semantically invalid strings.
|
||||
return nil
|
||||
}
|
||||
|
||||
func verboseName(r reference.Reference) string {
|
||||
if r == nil {
|
||||
return ""
|
||||
}
|
||||
named, isNamed := r.(reference.Named)
|
||||
digested, isDigested := r.(reference.Digested)
|
||||
tagged, isTagged := r.(reference.Tagged)
|
||||
name := ""
|
||||
tag := ""
|
||||
sum := ""
|
||||
if isNamed {
|
||||
name = (reference.TrimNamed(named)).String()
|
||||
}
|
||||
if isTagged {
|
||||
if tagged.Tag() != "" {
|
||||
tag = ":" + tagged.Tag()
|
||||
}
|
||||
}
|
||||
if isDigested {
|
||||
if digested.Digest().Validate() == nil {
|
||||
sum = "@" + digested.Digest().String()
|
||||
}
|
||||
}
|
||||
return name + tag + sum
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
_ "github.com/containers/image/directory"
|
||||
_ "github.com/containers/image/docker"
|
||||
_ "github.com/containers/image/docker/archive"
|
||||
_ "github.com/containers/image/docker/daemon"
|
||||
_ "github.com/containers/image/oci/archive"
|
||||
_ "github.com/containers/image/oci/layout"
|
||||
_ "github.com/containers/image/openshift"
|
||||
|
||||
8
vendor/github.com/containers/image/transports/alltransports/docker_daemon.go
generated
vendored
Normal file
8
vendor/github.com/containers/image/transports/alltransports/docker_daemon.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// +build !containers_image_docker_daemon_stub
|
||||
|
||||
package alltransports
|
||||
|
||||
import (
|
||||
// Register the docker-daemon transport
|
||||
_ "github.com/containers/image/docker/daemon"
|
||||
)
|
||||
9
vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go
generated
vendored
Normal file
9
vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build containers_image_docker_daemon_stub
|
||||
|
||||
package alltransports
|
||||
|
||||
import "github.com/containers/image/transports"
|
||||
|
||||
func init() {
|
||||
transports.Register(transports.NewStubTransport("docker-daemon"))
|
||||
}
|
||||
7
vendor/github.com/containers/image/types/types.go
generated
vendored
7
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -174,6 +174,11 @@ type ImageDestination interface {
|
||||
AcceptsForeignLayerURLs() bool
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
MustMatchRuntimeOS() bool
|
||||
// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
IgnoresEmbeddedDockerReference() bool
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@@ -359,6 +364,8 @@ type SystemContext struct {
|
||||
OCIInsecureSkipTLSVerify bool
|
||||
// If not "", use a shared directory for storing blobs rather than within OCI layouts
|
||||
OCISharedBlobDirPath string
|
||||
// Allow UnCompress image layer for OCI image layer
|
||||
OCIAcceptUncompressedLayers bool
|
||||
|
||||
// === docker.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
|
||||
9
vendor/github.com/containers/image/vendor.conf
generated
vendored
9
vendor/github.com/containers/image/vendor.conf
generated
vendored
@@ -5,10 +5,11 @@ github.com/containers/storage master
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
|
||||
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||
@@ -38,3 +39,7 @@ github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
github.com/pquerna/ffjson master
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
|
||||
3
vendor/github.com/containers/storage/README.md
generated
vendored
3
vendor/github.com/containers/storage/README.md
generated
vendored
@@ -41,3 +41,6 @@ memory and stored along with the library's own bookkeeping information.
|
||||
Additionally, the library can store one or more of what it calls *big data* for
|
||||
images and containers. This is a named chunk of larger data, which is only in
|
||||
memory when it is being read from or being written to its own disk file.
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
Information about contributing to this project.
|
||||
|
||||
11
vendor/github.com/containers/storage/containers.go
generated
vendored
11
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -191,6 +192,9 @@ func (r *containerStore) Load() error {
|
||||
}
|
||||
|
||||
func (r *containerStore) Save() error {
|
||||
if !r.Locked() {
|
||||
return errors.New("container store is not locked")
|
||||
}
|
||||
rpath := r.containerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
@@ -278,7 +282,8 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, ErrDuplicateName
|
||||
return nil, errors.Wrapf(ErrDuplicateName,
|
||||
fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
@@ -558,3 +563,7 @@ func (r *containerStore) IsReadWrite() bool {
|
||||
func (r *containerStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
||||
func (r *containerStore) Locked() bool {
|
||||
return r.lockfile.Locked()
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
3
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
@@ -1,5 +1,6 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./containers.go
|
||||
// source: containers.go
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
|
||||
40
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
40
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
mountpk "github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@@ -77,6 +78,7 @@ type Driver struct {
|
||||
pathCache map[string]string
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
locker *locker.Locker
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
// Init returns a new AUFS driver.
|
||||
@@ -103,6 +105,20 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
|
||||
}
|
||||
|
||||
var mountOptions string
|
||||
for _, option := range options {
|
||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "aufs.mountopt":
|
||||
mountOptions = val
|
||||
default:
|
||||
return nil, fmt.Errorf("option %s not supported", option)
|
||||
}
|
||||
}
|
||||
paths := []string{
|
||||
"mnt",
|
||||
"diff",
|
||||
@@ -110,12 +126,13 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
}
|
||||
|
||||
a := &Driver{
|
||||
root: root,
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
pathCache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
||||
locker: locker.New(),
|
||||
root: root,
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
pathCache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
||||
locker: locker.New(),
|
||||
mountOptions: mountOptions,
|
||||
}
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
@@ -399,7 +416,7 @@ func atomicRemove(source string) error {
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (a *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
@@ -653,6 +670,10 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
}
|
||||
|
||||
opts := "dio,xino=/dev/shm/aufs.xino"
|
||||
if a.mountOptions != "" {
|
||||
opts += fmt.Sprintf(",%s", a.mountOptions)
|
||||
}
|
||||
|
||||
if useDirperm() {
|
||||
opts += ",dirperm1"
|
||||
}
|
||||
@@ -707,3 +728,8 @@ func useDirperm() bool {
|
||||
func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
return fmt.Errorf("aufs doesn't support changing ID mappings")
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (a *Driver) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -110,6 +110,8 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
|
||||
}
|
||||
userDiskQuota = true
|
||||
options.minSpace = uint64(minSpace)
|
||||
case "btrfs.mountopt":
|
||||
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
|
||||
default:
|
||||
return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
|
||||
}
|
||||
@@ -632,7 +634,7 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get the requested filesystem id.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
dir := d.subvolumesDirID(id)
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
|
||||
50
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
50
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
@@ -56,47 +55,7 @@ func chownByMapsMain() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking to %q: %v", path, err)
|
||||
}
|
||||
sysinfo := info.Sys()
|
||||
if st, ok := sysinfo.(*syscall.Stat_t); ok {
|
||||
// Map an on-disk UID/GID pair from host to container
|
||||
// using the first map, then back to the host using the
|
||||
// second map. Skip that first step if they're 0, to
|
||||
// compensate for cases where a parent layer should
|
||||
// have had a mapped value, but didn't.
|
||||
uid, gid := int(st.Uid), int(st.Gid)
|
||||
if toContainer != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedUid, mappedGid, err := toContainer.ToContainer(pair)
|
||||
if err != nil {
|
||||
if (uid != 0) || (gid != 0) {
|
||||
return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
|
||||
}
|
||||
mappedUid, mappedGid = uid, gid
|
||||
}
|
||||
uid, gid = mappedUid, mappedGid
|
||||
}
|
||||
if toHost != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedPair, err := toHost.ToHost(pair)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
|
||||
}
|
||||
uid, gid = mappedPair.UID, mappedPair.GID
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
// Make the change.
|
||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return platformLChown(path, info, toHost, toContainer)
|
||||
}
|
||||
if err := filepath.Walk(".", chown); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
|
||||
@@ -155,7 +114,7 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
|
||||
// same "container" IDs.
|
||||
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
driver := n.ProtoDriver
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -165,3 +124,8 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost
|
||||
|
||||
return ChownPathByMaps(layerFs, toContainer, toHost)
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (n *naiveLayerIDMapUpdater) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
55
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
Normal file
55
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// +build !windows
|
||||
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
|
||||
sysinfo := info.Sys()
|
||||
if st, ok := sysinfo.(*syscall.Stat_t); ok {
|
||||
// Map an on-disk UID/GID pair from host to container
|
||||
// using the first map, then back to the host using the
|
||||
// second map. Skip that first step if they're 0, to
|
||||
// compensate for cases where a parent layer should
|
||||
// have had a mapped value, but didn't.
|
||||
uid, gid := int(st.Uid), int(st.Gid)
|
||||
if toContainer != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedUid, mappedGid, err := toContainer.ToContainer(pair)
|
||||
if err != nil {
|
||||
if (uid != 0) || (gid != 0) {
|
||||
return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
|
||||
}
|
||||
mappedUid, mappedGid = uid, gid
|
||||
}
|
||||
uid, gid = mappedUid, mappedGid
|
||||
}
|
||||
if toHost != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedPair, err := toHost.ToHost(pair)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
|
||||
}
|
||||
uid, gid = mappedPair.UID, mappedPair.GID
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
// Make the change.
|
||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
14
vendor/github.com/containers/storage/drivers/chown_windows.go
generated
vendored
Normal file
14
vendor/github.com/containers/storage/drivers/chown_windows.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build windows
|
||||
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
|
||||
return &os.PathError{"lchown", path, syscall.EWINDOWS}
|
||||
}
|
||||
2
vendor/github.com/containers/storage/drivers/chroot_windows.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chroot_windows.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@@ -2690,7 +2690,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
||||
devices.filesystem = val
|
||||
case "dm.mkfsarg":
|
||||
devices.mkfsArgs = append(devices.mkfsArgs, val)
|
||||
case "dm.mountopt":
|
||||
case "dm.mountopt", "devicemapper.mountopt":
|
||||
devices.mountOptions = joinMountOptions(devices.mountOptions, val)
|
||||
case "dm.metadatadev":
|
||||
devices.metadataDevice = val
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@@ -163,7 +163,7 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get mounts a device with given id into the root filesystem
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
mp := path.Join(d.home, "mnt", id)
|
||||
|
||||
7
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@@ -66,8 +66,9 @@ type ProtoDriver interface {
|
||||
Remove(id string) error
|
||||
// Get returns the mountpoint for the layered filesystem referred
|
||||
// to by this id. You can optionally specify a mountLabel or "".
|
||||
// Optionally it gets the mappings used to create the layer.
|
||||
// Returns the absolute path to the mounted layered filesystem.
|
||||
Get(id, mountLabel string) (dir string, err error)
|
||||
Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (dir string, err error)
|
||||
// Put releases the system resources for the specified id,
|
||||
// e.g, unmounting layered filesystem.
|
||||
Put(id string) error
|
||||
@@ -118,6 +119,10 @@ type LayerIDMapUpdater interface {
|
||||
// relative to a parent layer, but before this method is called, may be discarded
|
||||
// by Diff().
|
||||
UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a
|
||||
// image and it is not required to Chown the files when running in an user namespace.
|
||||
SupportsShifting() bool
|
||||
}
|
||||
|
||||
// Driver is the interface for layered/snapshot file system drivers.
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
@@ -54,7 +54,8 @@ var (
|
||||
// Slice of drivers that should be used in an order
|
||||
priority = []string{
|
||||
"overlay",
|
||||
"devicemapper",
|
||||
// We don't support devicemapper without configuration
|
||||
// "devicemapper",
|
||||
"aufs",
|
||||
"btrfs",
|
||||
"zfs",
|
||||
|
||||
13
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@@ -51,7 +51,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
}
|
||||
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
||||
}), nil
|
||||
}
|
||||
|
||||
parentFs, err := driver.Get(parent, mountLabel)
|
||||
parentFs, err := driver.Get(parent, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
}
|
||||
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
|
||||
parentFs := ""
|
||||
|
||||
if parent != "" {
|
||||
parentFs, err = driver.Get(parent, mountLabel)
|
||||
parentFs, err = driver.Get(parent, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
|
||||
}
|
||||
|
||||
// Mount the root filesystem so we can apply the diff/layer.
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -163,6 +163,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
|
||||
start := time.Now().UTC()
|
||||
logrus.Debug("Start untar layer")
|
||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
||||
logrus.Errorf("Error while applying layer: %s", err)
|
||||
return
|
||||
}
|
||||
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
@@ -188,7 +189,7 @@ func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings,
|
||||
return
|
||||
}
|
||||
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
140
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
140
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/ostree"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
@@ -84,6 +86,10 @@ type overlayOptions struct {
|
||||
overrideKernelCheck bool
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
@@ -98,6 +104,7 @@ type Driver struct {
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
supportsDType bool
|
||||
locker *locker.Locker
|
||||
convert map[string]bool
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -147,15 +154,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID)
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
var supportsDType bool
|
||||
if opts.mountProgram != "" {
|
||||
supportsDType = true
|
||||
} else {
|
||||
supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID)
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
}
|
||||
}
|
||||
|
||||
if err := mount.MakePrivate(home); err != nil {
|
||||
return nil, err
|
||||
if !opts.skipMountHome {
|
||||
if err := mount.MakePrivate(home); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.ostreeRepo != "" {
|
||||
if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
@@ -167,6 +187,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
supportsDType: supportsDType,
|
||||
locker: locker.New(),
|
||||
options: *opts,
|
||||
convert: make(map[string]bool),
|
||||
}
|
||||
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d)
|
||||
@@ -203,6 +224,8 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
|
||||
o.mountOptions = val
|
||||
case ".size", "overlay.size", "overlay2.size":
|
||||
logrus.Debugf("overlay: size=%s", val)
|
||||
size, err := units.RAMInBytes(val)
|
||||
@@ -227,6 +250,25 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
}
|
||||
o.imageStores = append(o.imageStores, store)
|
||||
}
|
||||
case ".mount_program", "overlay.mount_program", "overlay2.mount_program":
|
||||
logrus.Debugf("overlay: mount_program=%s", val)
|
||||
_, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
|
||||
}
|
||||
o.mountProgram = val
|
||||
case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo":
|
||||
logrus.Debugf("overlay: ostree_repo=%s", val)
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
o.ostreeRepo = val
|
||||
case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
|
||||
logrus.Debugf("overlay: skip_mount_home=%s", val)
|
||||
o.skipMountHome, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("overlay: Unknown option %s", key)
|
||||
}
|
||||
@@ -236,6 +278,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
|
||||
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
|
||||
// We can try to modprobe overlay first
|
||||
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
@@ -380,6 +423,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
||||
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
|
||||
}
|
||||
}
|
||||
|
||||
if d.options.ostreeRepo != "" {
|
||||
d.convert[id] = true
|
||||
}
|
||||
|
||||
return d.create(id, parent, opts)
|
||||
}
|
||||
|
||||
@@ -543,10 +591,42 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
return lowersArray, nil
|
||||
}
|
||||
|
||||
func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string {
|
||||
if uidMaps == nil {
|
||||
uidMaps = d.uidMaps
|
||||
}
|
||||
if gidMaps == nil {
|
||||
gidMaps = d.gidMaps
|
||||
}
|
||||
if uidMaps != nil {
|
||||
var uids, gids bytes.Buffer
|
||||
for _, i := range uidMaps {
|
||||
if uids.Len() > 0 {
|
||||
uids.WriteString(":")
|
||||
}
|
||||
uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size))
|
||||
}
|
||||
for _, i := range gidMaps {
|
||||
if gids.Len() > 0 {
|
||||
gids.WriteString(":")
|
||||
}
|
||||
gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size))
|
||||
}
|
||||
return fmt.Sprintf("%s,uidmapping=%s,gidmapping=%s", opts, uids.String(), gids.String())
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// Remove cleans the directories that are created for this id.
|
||||
func (d *Driver) Remove(id string) error {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
|
||||
// Ignore errors, we don't want to fail if the ostree branch doesn't exist,
|
||||
if d.options.ostreeRepo != "" {
|
||||
ostree.DeleteOSTree(d.options.ostreeRepo, id)
|
||||
}
|
||||
|
||||
dir := d.dir(id)
|
||||
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||
if err == nil {
|
||||
@@ -562,7 +642,11 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (_ string, retErr error) {
|
||||
return d.get(id, mountLabel, false, uidMaps, gidMaps)
|
||||
}
|
||||
|
||||
func (d *Driver) get(id, mountLabel string, disableShifting bool, uidMaps, gidMaps []idtools.IDMap) (_ string, retErr error) {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
dir := d.dir(id)
|
||||
@@ -653,8 +737,11 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||
|
||||
workDir := path.Join(dir, "work")
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
|
||||
if d.options.mountOptions != "" {
|
||||
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
|
||||
}
|
||||
mountData := label.FormatMountLabel(opts, mountLabel)
|
||||
mount := unix.Mount
|
||||
mountFunc := unix.Mount
|
||||
mountTarget := mergedDir
|
||||
|
||||
pageSize := unix.Getpagesize()
|
||||
@@ -663,20 +750,30 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||
// the page size. The mount syscall fails if the mount data cannot
|
||||
// fit within a page and relative links make the mount data much
|
||||
// smaller at the expense of requiring a fork exec to chroot.
|
||||
if len(mountData) > pageSize {
|
||||
if d.options.mountProgram != "" {
|
||||
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
if !disableShifting {
|
||||
label = d.optsAppendMappings(label, uidMaps, gidMaps)
|
||||
}
|
||||
|
||||
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
|
||||
mountProgram.Dir = d.home
|
||||
return mountProgram.Run()
|
||||
}
|
||||
} else if len(mountData) > pageSize {
|
||||
//FIXME: We need to figure out to get this to work with additional stores
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work"))
|
||||
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||
if len(mountData) > pageSize {
|
||||
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
}
|
||||
|
||||
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
return mountFrom(d.home, source, target, mType, flags, label)
|
||||
}
|
||||
mountTarget = path.Join(id, "merged")
|
||||
}
|
||||
if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
|
||||
flags, data := mount.ParseOptions(mountData)
|
||||
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
|
||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
||||
}
|
||||
|
||||
@@ -764,6 +861,13 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, convert := d.convert[id]
|
||||
if convert {
|
||||
if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return directory.Size(applyDir)
|
||||
}
|
||||
|
||||
@@ -848,7 +952,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
}
|
||||
|
||||
// Mount the new layer and handle ownership changes and possible copy_ups in it.
|
||||
layerFs, err := d.Get(id, mountLabel)
|
||||
layerFs, err := d.get(id, mountLabel, true, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -885,6 +989,14 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (d *Driver) SupportsShifting() bool {
|
||||
if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" {
|
||||
return true
|
||||
}
|
||||
return d.options.mountProgram != ""
|
||||
}
|
||||
|
||||
// dumbJoin is more or less a dumber version of filepath.Join, but one which
|
||||
// won't Clean() the path, allowing us to append ".." as a component and trust
|
||||
// pathname resolution to do some non-obvious work.
|
||||
|
||||
59
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
59
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ostree"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
)
|
||||
@@ -42,6 +43,30 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
d.homes = append(d.homes, strings.Split(option[12:], ",")...)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(option, "vfs.ostree_repo=") {
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
d.ostreeRepo = option[16:]
|
||||
}
|
||||
if strings.HasPrefix(option, ".ostree_repo=") {
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
d.ostreeRepo = option[13:]
|
||||
}
|
||||
if strings.HasPrefix(option, "vfs.mountopt=") {
|
||||
return nil, fmt.Errorf("vfs driver does not support mount options")
|
||||
}
|
||||
}
|
||||
if d.ostreeRepo != "" {
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
|
||||
}
|
||||
@@ -53,6 +78,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
type Driver struct {
|
||||
homes []string
|
||||
idMappings *idtools.IDMappings
|
||||
ostreeRepo string
|
||||
}
|
||||
|
||||
func (d *Driver) String() string {
|
||||
@@ -77,11 +103,15 @@ func (d *Driver) Cleanup() error {
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
return d.Create(id, parent, opts)
|
||||
return d.create(id, parent, opts, false)
|
||||
}
|
||||
|
||||
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
|
||||
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
return d.create(id, parent, opts, true)
|
||||
}
|
||||
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) error {
|
||||
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||
return fmt.Errorf("--storage-opt is not supported for vfs")
|
||||
}
|
||||
@@ -106,14 +136,23 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
|
||||
label.SetFileLabel(dir, mountLabel)
|
||||
}
|
||||
if parent == "" {
|
||||
return nil
|
||||
if parent != "" {
|
||||
parentDir, err := d.Get(parent, "", nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
}
|
||||
if err := CopyWithTar(parentDir, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parentDir, err := d.Get(parent, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
|
||||
if ro && d.ostreeRepo != "" {
|
||||
if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return CopyWithTar(parentDir, dir)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
@@ -132,11 +171,15 @@ func (d *Driver) dir(id string) string {
|
||||
|
||||
// Remove deletes the content from the directory for a given id.
|
||||
func (d *Driver) Remove(id string) error {
|
||||
if d.ostreeRepo != "" {
|
||||
// Ignore errors, we don't want to fail if the ostree branch doesn't exist,
|
||||
ostree.DeleteOSTree(d.ostreeRepo, id)
|
||||
}
|
||||
return system.EnsureRemoveAll(d.dir(id))
|
||||
}
|
||||
|
||||
// Get returns the directory for the given id.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
dir := d.dir(id)
|
||||
if st, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
|
||||
22
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
22
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@@ -86,6 +86,14 @@ type Driver struct {
|
||||
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
|
||||
|
||||
for _, option := range options {
|
||||
if strings.HasPrefix(option, "windows.mountopt=") {
|
||||
return nil, fmt.Errorf("windows driver does not support mount options")
|
||||
} else {
|
||||
return nil, fmt.Errorf("option %s not supported", option)
|
||||
}
|
||||
}
|
||||
|
||||
fsType, err := getFileSystemType(string(home[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -354,7 +362,7 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get returns the rootfs path for the id. This will mount the dir at its given path.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
panicIfUsedByLcow()
|
||||
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
|
||||
var dir string
|
||||
@@ -612,7 +620,7 @@ func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent stri
|
||||
return
|
||||
}
|
||||
|
||||
layerFs, err := d.Get(id, "")
|
||||
layerFs, err := d.Get(id, "", nil, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -940,17 +948,17 @@ func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
|
||||
// matching those in toContainer to matching those in toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
return fmt.Errorf("windows doesn't support changing ID mappings")
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (d *Driver) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type storageOptions struct {
|
||||
size uint64
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
11
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@@ -24,8 +24,9 @@ import (
|
||||
)
|
||||
|
||||
type zfsOptions struct {
|
||||
fsName string
|
||||
mountPath string
|
||||
fsName string
|
||||
mountPath string
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -134,6 +135,8 @@ func parseOptions(opt []string) (zfsOptions, error) {
|
||||
switch key {
|
||||
case "zfs.fsname":
|
||||
options.fsName = val
|
||||
case "zfs.mountopt":
|
||||
options.mountOptions = val
|
||||
default:
|
||||
return options, fmt.Errorf("Unknown option %s", key)
|
||||
}
|
||||
@@ -357,14 +360,14 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
mountpoint := d.mountPath(id)
|
||||
if count := d.ctr.Increment(mountpoint); count > 1 {
|
||||
return mountpoint, nil
|
||||
}
|
||||
|
||||
filesystem := d.zfsPath(id)
|
||||
options := label.FormatMountLabel("", mountLabel)
|
||||
options := label.FormatMountLabel(d.options.mountOptions, mountLabel)
|
||||
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
|
||||
41
vendor/github.com/containers/storage/images.go
generated
vendored
41
vendor/github.com/containers/storage/images.go
generated
vendored
@@ -40,6 +40,11 @@ type Image struct {
|
||||
// same top layer.
|
||||
TopLayer string `json:"layer,omitempty"`
|
||||
|
||||
// MappedTopLayers are the IDs of alternate versions of the top layer
|
||||
// which have the same contents and parent, and which differ from
|
||||
// TopLayer only in which ID mappings they use.
|
||||
MappedTopLayers []string `json:"mapped-layers,omitempty"`
|
||||
|
||||
// Metadata is data we keep for the convenience of the caller. It is not
|
||||
// expected to be large, since it is kept in memory.
|
||||
Metadata string `json:"metadata,omitempty"`
|
||||
@@ -126,16 +131,17 @@ type imageStore struct {
|
||||
|
||||
func copyImage(i *Image) *Image {
|
||||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,6 +219,9 @@ func (r *imageStore) Save() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
|
||||
}
|
||||
if !r.Locked() {
|
||||
return errors.New("image store is not locked")
|
||||
}
|
||||
rpath := r.imagespath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
@@ -362,6 +371,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
return image, err
|
||||
}
|
||||
|
||||
func (r *imageStore) addMappedTopLayer(id, layer string) error {
|
||||
if image, ok := r.lookup(id); ok {
|
||||
image.MappedTopLayers = append(image.MappedTopLayers, layer)
|
||||
return r.Save()
|
||||
}
|
||||
return ErrImageUnknown
|
||||
}
|
||||
|
||||
func (r *imageStore) Metadata(id string) (string, error) {
|
||||
if image, ok := r.lookup(id); ok {
|
||||
return image.Metadata, nil
|
||||
@@ -687,3 +704,7 @@ func (r *imageStore) IsReadWrite() bool {
|
||||
func (r *imageStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
||||
func (r *imageStore) Locked() bool {
|
||||
return r.lockfile.Locked()
|
||||
}
|
||||
|
||||
112
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
112
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./images.go
|
||||
// source: images.go
|
||||
|
||||
package storage
|
||||
|
||||
@@ -64,6 +64,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
fflib.WriteJsonString(buf, string(j.TopLayer))
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.MappedTopLayers) != 0 {
|
||||
buf.WriteString(`"mapped-layers":`)
|
||||
if j.MappedTopLayers != nil {
|
||||
buf.WriteString(`[`)
|
||||
for i, v := range j.MappedTopLayers {
|
||||
if i != 0 {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
fflib.WriteJsonString(buf, string(v))
|
||||
}
|
||||
buf.WriteString(`]`)
|
||||
} else {
|
||||
buf.WriteString(`null`)
|
||||
}
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.Metadata) != 0 {
|
||||
buf.WriteString(`"metadata":`)
|
||||
fflib.WriteJsonString(buf, string(j.Metadata))
|
||||
@@ -157,6 +173,8 @@ const (
|
||||
|
||||
ffjtImageTopLayer
|
||||
|
||||
ffjtImageMappedTopLayers
|
||||
|
||||
ffjtImageMetadata
|
||||
|
||||
ffjtImageBigDataNames
|
||||
@@ -178,6 +196,8 @@ var ffjKeyImageNames = []byte("names")
|
||||
|
||||
var ffjKeyImageTopLayer = []byte("layer")
|
||||
|
||||
var ffjKeyImageMappedTopLayers = []byte("mapped-layers")
|
||||
|
||||
var ffjKeyImageMetadata = []byte("metadata")
|
||||
|
||||
var ffjKeyImageBigDataNames = []byte("big-data-names")
|
||||
@@ -311,7 +331,12 @@ mainparse:
|
||||
|
||||
case 'm':
|
||||
|
||||
if bytes.Equal(ffjKeyImageMetadata, kn) {
|
||||
if bytes.Equal(ffjKeyImageMappedTopLayers, kn) {
|
||||
currentKey = ffjtImageMappedTopLayers
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeyImageMetadata, kn) {
|
||||
currentKey = ffjtImageMetadata
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
@@ -363,6 +388,12 @@ mainparse:
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) {
|
||||
currentKey = ffjtImageMappedTopLayers
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) {
|
||||
currentKey = ffjtImageTopLayer
|
||||
state = fflib.FFParse_want_colon
|
||||
@@ -416,6 +447,9 @@ mainparse:
|
||||
case ffjtImageTopLayer:
|
||||
goto handle_TopLayer
|
||||
|
||||
case ffjtImageMappedTopLayers:
|
||||
goto handle_MappedTopLayers
|
||||
|
||||
case ffjtImageMetadata:
|
||||
goto handle_Metadata
|
||||
|
||||
@@ -600,6 +634,80 @@ handle_TopLayer:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_MappedTopLayers:
|
||||
|
||||
/* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/
|
||||
|
||||
{
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
|
||||
}
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_null {
|
||||
j.MappedTopLayers = nil
|
||||
} else {
|
||||
|
||||
j.MappedTopLayers = []string{}
|
||||
|
||||
wantVal := true
|
||||
|
||||
for {
|
||||
|
||||
var tmpJMappedTopLayers string
|
||||
|
||||
tok = fs.Scan()
|
||||
if tok == fflib.FFTok_error {
|
||||
goto tokerror
|
||||
}
|
||||
if tok == fflib.FFTok_right_brace {
|
||||
break
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_comma {
|
||||
if wantVal == true {
|
||||
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||
// things like [,,,] as an array value.
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
wantVal = true
|
||||
}
|
||||
|
||||
/* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/
|
||||
|
||||
{
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
|
||||
}
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
|
||||
outBuf := fs.Output.Bytes()
|
||||
|
||||
tmpJMappedTopLayers = string(string(outBuf))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers)
|
||||
|
||||
wantVal = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_Metadata:
|
||||
|
||||
/* handler: j.Metadata type=string kind=string quoted=false*/
|
||||
|
||||
60
vendor/github.com/containers/storage/layers.go
generated
vendored
60
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -208,10 +209,14 @@ type LayerStore interface {
|
||||
// Mount mounts a layer for use. If the specified layer is the parent of other
|
||||
// layers, it should not be written to. An SELinux label to be applied to the
|
||||
// mount can be specified to override the one configured for the layer.
|
||||
Mount(id, mountLabel string) (string, error)
|
||||
// The mappings used by the container can be specified.
|
||||
Mount(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error)
|
||||
|
||||
// Unmount unmounts a layer when it is no longer in use.
|
||||
Unmount(id string) error
|
||||
Unmount(id string, force bool) (bool, error)
|
||||
|
||||
// Mounted returns number of times the layer has been mounted.
|
||||
Mounted(id string) (int, error)
|
||||
|
||||
// ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint
|
||||
// for which the layer's UID and GID maps don't contain corresponding entries.
|
||||
@@ -366,6 +371,9 @@ func (r *layerStore) Save() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||
}
|
||||
if !r.Locked() {
|
||||
return errors.New("layer store is not locked")
|
||||
}
|
||||
rpath := r.layerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
@@ -624,7 +632,15 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
|
||||
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
|
||||
}
|
||||
|
||||
func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
func (r *layerStore) Mounted(id string) (int, error) {
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return 0, ErrLayerUnknown
|
||||
}
|
||||
return layer.MountCount, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Mount(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
@@ -639,7 +655,13 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
if mountLabel == "" {
|
||||
mountLabel = layer.MountLabel
|
||||
}
|
||||
mountpoint, err := r.driver.Get(id, mountLabel)
|
||||
|
||||
if (uidMaps != nil || gidMaps != nil) && !r.driver.SupportsShifting() {
|
||||
if !reflect.DeepEqual(uidMaps, layer.UIDMap) || !reflect.DeepEqual(gidMaps, layer.GIDMap) {
|
||||
return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID)
|
||||
}
|
||||
}
|
||||
mountpoint, err := r.driver.Get(id, mountLabel, uidMaps, gidMaps)
|
||||
if mountpoint != "" && err == nil {
|
||||
if layer.MountPoint != "" {
|
||||
delete(r.bymount, layer.MountPoint)
|
||||
@@ -652,21 +674,24 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
return mountpoint, err
|
||||
}
|
||||
|
||||
func (r *layerStore) Unmount(id string) error {
|
||||
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
return false, ErrLayerUnknown
|
||||
}
|
||||
layer = layerByMount
|
||||
}
|
||||
if force {
|
||||
layer.MountCount = 1
|
||||
}
|
||||
if layer.MountCount > 1 {
|
||||
layer.MountCount--
|
||||
return r.Save()
|
||||
return true, r.Save()
|
||||
}
|
||||
err := r.driver.Put(id)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
@@ -675,9 +700,9 @@ func (r *layerStore) Unmount(id string) error {
|
||||
}
|
||||
layer.MountCount--
|
||||
layer.MountPoint = ""
|
||||
err = r.Save()
|
||||
return false, r.Save()
|
||||
}
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
||||
@@ -797,10 +822,8 @@ func (r *layerStore) Delete(id string) error {
|
||||
return ErrLayerUnknown
|
||||
}
|
||||
id = layer.ID
|
||||
for layer.MountCount > 0 {
|
||||
if err := r.Unmount(id); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := r.Unmount(id, true); err != nil {
|
||||
return err
|
||||
}
|
||||
err := r.driver.Remove(id)
|
||||
if err == nil {
|
||||
@@ -917,14 +940,15 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
func (s *simpleGetCloser) Close() error {
|
||||
return s.r.Unmount(s.id)
|
||||
_, err := s.r.Unmount(s.id, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
|
||||
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
|
||||
return getter.DiffGetter(id)
|
||||
}
|
||||
path, err := r.Mount(id, "")
|
||||
path, err := r.Mount(id, "", nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1160,3 +1184,7 @@ func (r *layerStore) IsReadWrite() bool {
|
||||
func (r *layerStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
||||
func (r *layerStore) Locked() bool {
|
||||
return r.lockfile.Locked()
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
2
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./layers.go
|
||||
// source: layers.go
|
||||
|
||||
package storage
|
||||
|
||||
|
||||
3
vendor/github.com/containers/storage/lockfile.go
generated
vendored
3
vendor/github.com/containers/storage/lockfile.go
generated
vendored
@@ -28,6 +28,9 @@ type Locker interface {
|
||||
|
||||
// IsReadWrite() checks if the lock file is read-write
|
||||
IsReadWrite() bool
|
||||
|
||||
// Locked() checks if lock is locked
|
||||
Locked() bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
12
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
12
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
@@ -25,9 +25,9 @@ func getLockFile(path string, ro bool) (Locker, error) {
|
||||
}
|
||||
unix.CloseOnExec(fd)
|
||||
if ro {
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}, nil
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK, locked: false}, nil
|
||||
}
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}, nil
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK, locked: false}, nil
|
||||
}
|
||||
|
||||
type lockfile struct {
|
||||
@@ -36,6 +36,7 @@ type lockfile struct {
|
||||
fd uintptr
|
||||
lw string
|
||||
locktype int16
|
||||
locked bool
|
||||
}
|
||||
|
||||
// Lock locks the lock file
|
||||
@@ -48,6 +49,7 @@ func (l *lockfile) Lock() {
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
l.mu.Lock()
|
||||
l.locked = true
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
@@ -65,9 +67,15 @@ func (l *lockfile) Unlock() {
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
l.locked = false
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
// Check if lock is locked
|
||||
func (l *lockfile) Locked() bool {
|
||||
return l.locked
|
||||
}
|
||||
|
||||
// Touch updates the lock file with the UID of the user
|
||||
func (l *lockfile) Touch() error {
|
||||
l.lw = stringid.GenerateRandomID()
|
||||
|
||||
17
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
17
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
@@ -9,18 +9,29 @@ import (
|
||||
)
|
||||
|
||||
func getLockFile(path string, ro bool) (Locker, error) {
|
||||
return &lockfile{}, nil
|
||||
return &lockfile{locked: false}, nil
|
||||
}
|
||||
|
||||
type lockfile struct {
|
||||
mu sync.Mutex
|
||||
file string
|
||||
mu sync.Mutex
|
||||
file string
|
||||
locked bool
|
||||
}
|
||||
|
||||
func (l *lockfile) Lock() {
|
||||
l.mu.Lock()
|
||||
l.locked = true
|
||||
}
|
||||
|
||||
func (l *lockfile) Unlock() {
|
||||
l.locked = false
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *lockfile) Locked() bool {
|
||||
return l.locked
|
||||
}
|
||||
|
||||
func (l *lockfile) Modified() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
16
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
16
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -56,6 +56,11 @@ type (
|
||||
// replaced with the matching name from this map.
|
||||
RebaseNames map[string]string
|
||||
InUserNS bool
|
||||
// CopyPass indicates that the contents of any archive we're creating
|
||||
// will instantly be extracted and written to disk, so we can deviate
|
||||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
}
|
||||
)
|
||||
|
||||
@@ -396,6 +401,11 @@ type tarAppender struct {
|
||||
// by the AUFS standard are used as the tar whiteout
|
||||
// standard.
|
||||
WhiteoutConverter tarWhiteoutConverter
|
||||
// CopyPass indicates that the contents of any archive we're creating
|
||||
// will instantly be extracted and written to disk, so we can deviate
|
||||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||
@@ -446,6 +456,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if ta.CopyPass {
|
||||
copyPassHeader(hdr)
|
||||
}
|
||||
|
||||
// if it's not a directory and has more than 1 link,
|
||||
// it's hard linked, so set the type flag accordingly
|
||||
@@ -710,6 +723,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
options.ChownOpts,
|
||||
)
|
||||
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
@@ -1039,6 +1053,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
UIDMaps: tarMappings.UIDs(),
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
Compression: Uncompressed,
|
||||
CopyPass: true,
|
||||
}
|
||||
archive, err := TarWithOptions(src, options)
|
||||
if err != nil {
|
||||
@@ -1145,6 +1160,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
copyPassHeader(hdr)
|
||||
|
||||
if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil {
|
||||
return err
|
||||
|
||||
11
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
Normal file
11
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build go1.10
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
)
|
||||
|
||||
func copyPassHeader(hdr *tar.Header) {
|
||||
hdr.Format = tar.FormatPAX
|
||||
}
|
||||
10
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build !go1.10
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
)
|
||||
|
||||
func copyPassHeader(hdr *tar.Header) {
|
||||
}
|
||||
118
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
118
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./pkg/archive/archive.go
|
||||
// source: pkg/archive/archive.go
|
||||
|
||||
package archive
|
||||
|
||||
@@ -491,6 +491,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
} else {
|
||||
buf.WriteString(`,"InUserNS":false`)
|
||||
}
|
||||
if j.CopyPass {
|
||||
buf.WriteString(`,"CopyPass":true`)
|
||||
} else {
|
||||
buf.WriteString(`,"CopyPass":false`)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
@@ -524,6 +529,8 @@ const (
|
||||
ffjtTarOptionsRebaseNames
|
||||
|
||||
ffjtTarOptionsInUserNS
|
||||
|
||||
ffjtTarOptionsCopyPass
|
||||
)
|
||||
|
||||
var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles")
|
||||
@@ -552,6 +559,8 @@ var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames")
|
||||
|
||||
var ffjKeyTarOptionsInUserNS = []byte("InUserNS")
|
||||
|
||||
var ffjKeyTarOptionsCopyPass = []byte("CopyPass")
|
||||
|
||||
// UnmarshalJSON umarshall json - template of ffjson
|
||||
func (j *TarOptions) UnmarshalJSON(input []byte) error {
|
||||
fs := fflib.NewFFLexer(input)
|
||||
@@ -624,6 +633,11 @@ mainparse:
|
||||
currentKey = ffjtTarOptionsChownOpts
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) {
|
||||
currentKey = ffjtTarOptionsCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
case 'E':
|
||||
@@ -704,6 +718,12 @@ mainparse:
|
||||
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) {
|
||||
currentKey = ffjtTarOptionsCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) {
|
||||
currentKey = ffjtTarOptionsInUserNS
|
||||
state = fflib.FFParse_want_colon
|
||||
@@ -838,6 +858,9 @@ mainparse:
|
||||
case ffjtTarOptionsInUserNS:
|
||||
goto handle_InUserNS
|
||||
|
||||
case ffjtTarOptionsCopyPass:
|
||||
goto handle_CopyPass
|
||||
|
||||
case ffjtTarOptionsnosuchkey:
|
||||
err = fs.SkipField(tok)
|
||||
if err != nil {
|
||||
@@ -1481,6 +1504,41 @@ handle_InUserNS:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_CopyPass:
|
||||
|
||||
/* handler: j.CopyPass type=bool kind=bool quoted=false*/
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
tmpb := fs.Output.Bytes()
|
||||
|
||||
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = true
|
||||
|
||||
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = false
|
||||
|
||||
} else {
|
||||
err = errors.New("unexpected bytes for true/false value")
|
||||
return fs.WrapErr(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
wantedvalue:
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
wrongtokenerror:
|
||||
@@ -1773,6 +1831,11 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if j.CopyPass {
|
||||
buf.WriteString(`,"CopyPass":true`)
|
||||
} else {
|
||||
buf.WriteString(`,"CopyPass":false`)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
@@ -1792,6 +1855,8 @@ const (
|
||||
ffjttarAppenderChownOpts
|
||||
|
||||
ffjttarAppenderWhiteoutConverter
|
||||
|
||||
ffjttarAppenderCopyPass
|
||||
)
|
||||
|
||||
var ffjKeytarAppenderTarWriter = []byte("TarWriter")
|
||||
@@ -1806,6 +1871,8 @@ var ffjKeytarAppenderChownOpts = []byte("ChownOpts")
|
||||
|
||||
var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter")
|
||||
|
||||
var ffjKeytarAppenderCopyPass = []byte("CopyPass")
|
||||
|
||||
// UnmarshalJSON umarshall json - template of ffjson
|
||||
func (j *tarAppender) UnmarshalJSON(input []byte) error {
|
||||
fs := fflib.NewFFLexer(input)
|
||||
@@ -1881,6 +1948,11 @@ mainparse:
|
||||
currentKey = ffjttarAppenderChownOpts
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) {
|
||||
currentKey = ffjttarAppenderCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
case 'I':
|
||||
@@ -1917,6 +1989,12 @@ mainparse:
|
||||
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) {
|
||||
currentKey = ffjttarAppenderCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) {
|
||||
currentKey = ffjttarAppenderWhiteoutConverter
|
||||
state = fflib.FFParse_want_colon
|
||||
@@ -1988,6 +2066,9 @@ mainparse:
|
||||
case ffjttarAppenderWhiteoutConverter:
|
||||
goto handle_WhiteoutConverter
|
||||
|
||||
case ffjttarAppenderCopyPass:
|
||||
goto handle_CopyPass
|
||||
|
||||
case ffjttarAppendernosuchkey:
|
||||
err = fs.SkipField(tok)
|
||||
if err != nil {
|
||||
@@ -2211,6 +2292,41 @@ handle_WhiteoutConverter:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_CopyPass:
|
||||
|
||||
/* handler: j.CopyPass type=bool kind=bool quoted=false*/
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
tmpb := fs.Output.Bytes()
|
||||
|
||||
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = true
|
||||
|
||||
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = false
|
||||
|
||||
} else {
|
||||
err = errors.New("unexpected bytes for true/false value")
|
||||
return fs.WrapErr(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
wantedvalue:
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
wrongtokenerror:
|
||||
|
||||
97
vendor/github.com/containers/storage/pkg/archive/example_changes.go
generated
vendored
Normal file
97
vendor/github.com/containers/storage/pkg/archive/example_changes.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "storage-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
||||
12
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
@@ -7,7 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -18,10 +18,16 @@ import (
|
||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||
// This is similar to how libcontainer sets up a container's rootfs
|
||||
func chroot(path string) (err error) {
|
||||
// if the engine is running in a user namespace we need to use actual chroot
|
||||
if rsystem.RunningInUserNS() {
|
||||
caps, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot
|
||||
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) {
|
||||
return realChroot(path)
|
||||
}
|
||||
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd solaris
|
||||
// +build linux darwin freebsd solaris
|
||||
|
||||
package directory
|
||||
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@@ -140,10 +140,10 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) {
|
||||
return nil, err
|
||||
}
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName)
|
||||
}
|
||||
|
||||
return &IDMappings{
|
||||
|
||||
6
vendor/github.com/containers/storage/pkg/mount/flags.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/mount/flags.go
generated
vendored
@@ -111,9 +111,9 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
|
||||
return newOptions, nil
|
||||
}
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// ParseOptions parses fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
func ParseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
@@ -138,7 +138,7 @@ func parseOptions(options string) (int, string) {
|
||||
|
||||
// ParseTmpfsOptions parse fstab type mount options into flags and data
|
||||
func ParseTmpfsOptions(options string) (int, string, error) {
|
||||
flags, data := parseOptions(options)
|
||||
flags, data := ParseOptions(options)
|
||||
for _, o := range strings.Split(data, ",") {
|
||||
opt := strings.SplitN(o, "=", 2)
|
||||
if !validFlags[opt[0]] {
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/mount/mount.go
generated
vendored
@@ -39,7 +39,7 @@ func Mounted(mountpoint string) (bool, error) {
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, _ := parseOptions(options)
|
||||
flag, _ := ParseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
@@ -53,7 +53,7 @@ func Mount(device, target, mType, options string) error {
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
flag, data := ParseOptions(options)
|
||||
return mount(device, target, mType, uintptr(flag), data)
|
||||
}
|
||||
|
||||
|
||||
19
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
Normal file
19
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build !ostree
|
||||
|
||||
package ostree
|
||||
|
||||
func OstreeSupport() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func DeleteOSTree(repoLocation, id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertToOSTree(repoLocation, root, id string) error {
|
||||
return nil
|
||||
}
|
||||
198
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
Normal file
198
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// +build ostree
|
||||
|
||||
package ostree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/sys/unix"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
|
||||
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
|
||||
// #include <glib.h>
|
||||
// #include <glib-object.h>
|
||||
// #include <gio/gio.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <ostree.h>
|
||||
// #include <gio/ginputstream.h>
|
||||
import "C"
|
||||
|
||||
func OstreeSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func fixFiles(dir string, usermode bool) (bool, []string, error) {
|
||||
var SkipOstree = errors.New("skip ostree deduplication")
|
||||
|
||||
var whiteouts []string
|
||||
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
if !usermode {
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("not syscall.Stat_t")
|
||||
}
|
||||
|
||||
if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 {
|
||||
whiteouts = append(whiteouts, path)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Skip the ostree deduplication if we encounter a file type that
|
||||
// ostree does not manage.
|
||||
return SkipOstree
|
||||
}
|
||||
if info.IsDir() {
|
||||
if usermode {
|
||||
if err := os.Chmod(path, info.Mode()|0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if usermode && (info.Mode().IsRegular()) {
|
||||
if err := os.Chmod(path, info.Mode()|0600); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == SkipOstree {
|
||||
return true, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return false, whiteouts, nil
|
||||
}
|
||||
|
||||
// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent.
|
||||
func ConvertToOSTree(repoLocation, root, id string) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
repo, err := otbuiltin.OpenRepo(repoLocation)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not open the OSTree repository")
|
||||
}
|
||||
|
||||
skip, whiteouts, err := fixFiles(root, os.Getuid() != 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not prepare the OSTree directory")
|
||||
}
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := repo.PrepareTransaction(); err != nil {
|
||||
return errors.Wrap(err, "could not prepare the OSTree transaction")
|
||||
}
|
||||
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
|
||||
commitOpts := otbuiltin.NewCommitOptions()
|
||||
commitOpts.Timestamp = time.Now()
|
||||
commitOpts.LinkCheckoutSpeedup = true
|
||||
commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
branch := fmt.Sprintf("containers-storage/%s", id)
|
||||
|
||||
for _, w := range whiteouts {
|
||||
if err := os.Remove(w); err != nil {
|
||||
return errors.Wrap(err, "could not delete whiteout file")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := repo.Commit(root, branch, commitOpts); err != nil {
|
||||
return errors.Wrap(err, "could not commit the layer")
|
||||
}
|
||||
|
||||
if _, err := repo.CommitTransaction(); err != nil {
|
||||
return errors.Wrap(err, "could not complete the OSTree transaction")
|
||||
}
|
||||
|
||||
if err := system.EnsureRemoveAll(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete layer")
|
||||
}
|
||||
|
||||
checkoutOpts := otbuiltin.NewCheckoutOptions()
|
||||
checkoutOpts.RequireHardlinks = true
|
||||
checkoutOpts.Whiteouts = false
|
||||
if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil {
|
||||
return errors.Wrap(err, "could not checkout from OSTree")
|
||||
}
|
||||
|
||||
for _, w := range whiteouts {
|
||||
if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil {
|
||||
return errors.Wrap(err, "could not recreate whiteout file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
_, err := os.Stat(repoLocation)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err != nil {
|
||||
if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil {
|
||||
return errors.Wrap(err, "could not create OSTree repository directory: %v")
|
||||
}
|
||||
|
||||
if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil {
|
||||
return errors.Wrap(err, "could not create OSTree repository")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func openRepo(path string) (*C.struct_OstreeRepo, error) {
|
||||
var cerr *C.GError
|
||||
cpath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cpath))
|
||||
pathc := C.g_file_new_for_path(cpath)
|
||||
defer C.g_object_unref(C.gpointer(pathc))
|
||||
repo := C.ostree_repo_new(pathc)
|
||||
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
|
||||
if !r {
|
||||
C.g_object_unref(C.gpointer(repo))
|
||||
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func DeleteOSTree(repoLocation, id string) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
repo, err := openRepo(repoLocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer C.g_object_unref(C.gpointer(repo))
|
||||
|
||||
branch := fmt.Sprintf("containers-storage/%s", id)
|
||||
|
||||
cbranch := C.CString(branch)
|
||||
defer C.free(unsafe.Pointer(cbranch))
|
||||
|
||||
var cerr *C.GError
|
||||
r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr)))
|
||||
if !r {
|
||||
return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This package provides helper functions for dealing with strings
|
||||
99
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
99
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Package stringutils provides helper functions for dealing with strings.
|
||||
package stringutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
|
||||
func GenerateRandomAlphaOnlyString(n int) string {
|
||||
// make a really long string
|
||||
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// GenerateRandomASCIIString generates an ASCII random string with length n.
|
||||
func GenerateRandomASCIIString(n int) string {
|
||||
chars := "abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
|
||||
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
|
||||
res := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
res[i] = chars[rand.Intn(len(chars))]
|
||||
}
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
|
||||
// For maxlen of 3 and lower, no ellipsis is appended.
|
||||
func Ellipsis(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
if maxlen <= 3 {
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
return string(r[:maxlen-3]) + "..."
|
||||
}
|
||||
|
||||
// Truncate truncates a string to maxlen.
|
||||
func Truncate(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
|
||||
// InSlice tests whether a string is contained in a slice of strings or not.
|
||||
// Comparison is case insensitive
|
||||
func InSlice(slice []string, s string) bool {
|
||||
for _, ss := range slice {
|
||||
if strings.ToLower(s) == strings.ToLower(ss) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func quote(word string, buf *bytes.Buffer) {
|
||||
// Bail out early for "simple" strings
|
||||
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
|
||||
buf.WriteString(word)
|
||||
return
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
|
||||
for i := 0; i < len(word); i++ {
|
||||
b := word[i]
|
||||
if b == '\'' {
|
||||
// Replace literal ' with a close ', a \', and an open '
|
||||
buf.WriteString("'\\''")
|
||||
} else {
|
||||
buf.WriteByte(b)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
}
|
||||
|
||||
// ShellQuoteArguments takes a list of strings and escapes them so they will be
|
||||
// handled right when passed as arguments to a program via a shell
|
||||
func ShellQuoteArguments(args []string) string {
|
||||
var buf bytes.Buffer
|
||||
for i, arg := range args {
|
||||
if i != 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
quote(arg, &buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
14
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
@@ -28,6 +28,20 @@ func (s StatT) Mtim() time.Time {
|
||||
return time.Time(s.mtim)
|
||||
}
|
||||
|
||||
// UID returns file's user id of owner.
|
||||
//
|
||||
// on windows this is always 0 because there is no concept of UID
|
||||
func (s StatT) UID() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// GID returns file's group id of owner.
|
||||
//
|
||||
// on windows this is always 0 because there is no concept of GID
|
||||
func (s StatT) GID() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user