mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
215 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e079f9d61b | ||
|
|
ceabc0a404 | ||
|
|
523b8b44a2 | ||
|
|
d2d1796eb5 | ||
|
|
c67e5f7425 | ||
|
|
1b8686d044 | ||
|
|
a4de1428f9 | ||
|
|
524f6c0682 | ||
|
|
fa18fce7e8 | ||
|
|
96be1bb155 | ||
|
|
23c6b42b26 | ||
|
|
6307635b5f | ||
|
|
47e7cda4e9 | ||
|
|
5dd3b2bffd | ||
|
|
12f0e24519 | ||
|
|
b137741385 | ||
|
|
233804fedc | ||
|
|
0c90e57eaf | ||
|
|
8fb4ab3d92 | ||
|
|
8c9e250801 | ||
|
|
04aee56a36 | ||
|
|
4f1fabc2a4 | ||
|
|
43bc356337 | ||
|
|
41991bab70 | ||
|
|
2b5086167f | ||
|
|
b46d16f48c | ||
|
|
9fef0eb3f3 | ||
|
|
30b0a1741e | ||
|
|
945b9dc08f | ||
|
|
904b064da4 | ||
|
|
7ae62af073 | ||
|
|
7525a79c93 | ||
|
|
07287b5783 | ||
|
|
0a2a62ac20 | ||
|
|
5581c62a3a | ||
|
|
6b5bdb7563 | ||
|
|
2bdffc89c2 | ||
|
|
65e6449c95 | ||
|
|
2829f7da9e | ||
|
|
ece44c2842 | ||
|
|
0fa335c149 | ||
|
|
5c0ad57c2c | ||
|
|
b2934e7cf6 | ||
|
|
2af7114ea1 | ||
|
|
0e1cc9203e | ||
|
|
e255ccc145 | ||
|
|
9447a55b61 | ||
|
|
9fdceeb2b2 | ||
|
|
18ee5f8119 | ||
|
|
ab6a17059c | ||
|
|
81c5e94850 | ||
|
|
99dc83062a | ||
|
|
4d8ea6729f | ||
|
|
ac85091ecd | ||
|
|
ffa640c2b0 | ||
|
|
c73bcba7e6 | ||
|
|
329e1cf61c | ||
|
|
854f766dc7 | ||
|
|
5c73fdbfdc | ||
|
|
097549748a | ||
|
|
032309941b | ||
|
|
d93a581fb8 | ||
|
|
52075ab386 | ||
|
|
d65ae4b1d7 | ||
|
|
c32d27f59e | ||
|
|
883d65a54a | ||
|
|
94728fb73f | ||
|
|
520f0e5ddb | ||
|
|
fa39b49a5c | ||
|
|
0490018903 | ||
|
|
b434c8f424 | ||
|
|
79de2d9f09 | ||
|
|
2031e17b3c | ||
|
|
5a050c1383 | ||
|
|
404c5bd341 | ||
|
|
2134209960 | ||
|
|
1e8c029562 | ||
|
|
932b037d66 | ||
|
|
26a48586a0 | ||
|
|
683f4263ef | ||
|
|
ebfa1e936b | ||
|
|
509782e78b | ||
|
|
776b408f76 | ||
|
|
fee5981ebf | ||
|
|
d9e9604979 | ||
|
|
3606380bdb | ||
|
|
640b967463 | ||
|
|
b8b9913695 | ||
|
|
9e2720dfcc | ||
|
|
b329dd0d4e | ||
|
|
1b10352591 | ||
|
|
bba2874451 | ||
|
|
0322441640 | ||
|
|
8868d2ebe4 | ||
|
|
f19acc1c90 | ||
|
|
47f24b4097 | ||
|
|
c2597aab22 | ||
|
|
47065938da | ||
|
|
790620024e | ||
|
|
42b01df89e | ||
|
|
aafae2bc50 | ||
|
|
e5b9ea5ee6 | ||
|
|
1c2ff140cb | ||
|
|
f7c608e65e | ||
|
|
ec810c91fe | ||
|
|
17bea86e89 | ||
|
|
3e0026d907 | ||
|
|
3e98377bf2 | ||
|
|
0658bc80f3 | ||
|
|
e96a9b0e1b | ||
|
|
08c30b8f06 | ||
|
|
05212df1c5 | ||
|
|
7ec68dd463 | ||
|
|
6eb5131b85 | ||
|
|
736cd7641d | ||
|
|
78bd5dd3df | ||
|
|
ecd675e0a6 | ||
|
|
5675895460 | ||
|
|
0f8f870bd3 | ||
|
|
a51e38e60d | ||
|
|
8fe1595f92 | ||
|
|
2497f500d5 | ||
|
|
afa92d58f6 | ||
|
|
958cafb2c0 | ||
|
|
1d1bf0d393 | ||
|
|
3094320203 | ||
|
|
39de98777d | ||
|
|
8084f6f4e2 | ||
|
|
6ef45e5cf1 | ||
|
|
444b90a9cf | ||
|
|
72a3dc17ee | ||
|
|
88c748f47a | ||
|
|
7e8c89d619 | ||
|
|
694f915003 | ||
|
|
a77b409619 | ||
|
|
1faff791ce | ||
|
|
8b8afe0fda | ||
|
|
09a120a59b | ||
|
|
c769c7789e | ||
|
|
3ea3965e5e | ||
|
|
ee8391db34 | ||
|
|
e1cc97d9d7 | ||
|
|
f30756a9bb | ||
|
|
33b474b224 | ||
|
|
485a7aa330 | ||
|
|
59117e6e3d | ||
|
|
8ee3ead743 | ||
|
|
bc39e4f9b6 | ||
|
|
3017d87ade | ||
|
|
d8f1d4572b | ||
|
|
41d8dd8b80 | ||
|
|
bcf3dbbb93 | ||
|
|
bfc0c5e531 | ||
|
|
013ebac8d8 | ||
|
|
fbc2e4f70f | ||
|
|
72468d6817 | ||
|
|
5dec940523 | ||
|
|
761a6811c1 | ||
|
|
b3a023f9dd | ||
|
|
5aa217fe0d | ||
|
|
737438d026 | ||
|
|
1715c90841 | ||
|
|
187299a20b | ||
|
|
89d8bddd9b | ||
|
|
ba649c56bf | ||
|
|
3456577268 | ||
|
|
b52e700666 | ||
|
|
ee32f1f7aa | ||
|
|
5af0da9de6 | ||
|
|
879a6d793f | ||
|
|
2734f93e30 | ||
|
|
2b97124e4a | ||
|
|
7815a5801e | ||
|
|
501e1be3cf | ||
|
|
fc386a6dca | ||
|
|
2a134a0ddd | ||
|
|
17250d7e8d | ||
|
|
65d28709c3 | ||
|
|
d6c6c78d1b | ||
|
|
67ffa00b1d | ||
|
|
a581847345 | ||
|
|
bcd26a4ae4 | ||
|
|
e38c345f23 | ||
|
|
0421fb04c2 | ||
|
|
82186b916f | ||
|
|
15eed5beda | ||
|
|
81837bd55b | ||
|
|
3dec6a1cdf | ||
|
|
fe14427129 | ||
|
|
be27588418 | ||
|
|
fb84437cd1 | ||
|
|
d9b495ca38 | ||
|
|
6b93d4794f | ||
|
|
5d3849a510 | ||
|
|
fef142f811 | ||
|
|
2684e51aa5 | ||
|
|
e814f9605a | ||
|
|
5d136a46ed | ||
|
|
b0b750dfa1 | ||
|
|
e3034e1d91 | ||
|
|
1a259b76da | ||
|
|
ae64ff7084 | ||
|
|
d67d3a4620 | ||
|
|
196bc48723 | ||
|
|
1c6c7bc481 | ||
|
|
6e23a32282 | ||
|
|
f398c9c035 | ||
|
|
0144aa8dc5 | ||
|
|
0df5dcf09c | ||
|
|
f9baaa6b87 | ||
|
|
67ff78925b | ||
|
|
5c611083f2 | ||
|
|
976d57ea45 | ||
|
|
63569fcd63 | ||
|
|
98b3a13b46 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/docs/skopeo.1
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
@@ -21,4 +22,4 @@ install:
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make check ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
|
||||
@@ -15,7 +15,7 @@ that we follow.
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[open issues](https://github.com/containers/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
@@ -115,6 +115,35 @@ Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
@@ -122,9 +151,9 @@ IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[issues](https://github.com/containers/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/skopeo/pulls)
|
||||
[PRs](https://github.com/containers/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
|
||||
@@ -10,6 +10,7 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md
|
||||
gnupg \
|
||||
# OpenShift deps
|
||||
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
bats jq podman \
|
||||
&& dnf clean all
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
@@ -42,8 +43,9 @@ RUN set -x \
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
RUN go get github.com/golang/lint/golint
|
||||
WORKDIR /go/src/github.com/projectatomic/skopeo
|
||||
COPY . /go/src/github.com/projectatomic/skopeo
|
||||
RUN go version
|
||||
RUN go get golang.org/x/lint/golint
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM ubuntu:17.10
|
||||
FROM ubuntu:18.10
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
btrfs-tools \
|
||||
libbtrfs-dev \
|
||||
git-core \
|
||||
libdevmapper-dev \
|
||||
libgpgme11-dev \
|
||||
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
|
||||
102
Makefile
102
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container build-local clean install install-binary install-completions shell test-integration
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -22,58 +22,78 @@ CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(shell go env GOOS), linux)
|
||||
ifeq ($(shell $(GO) env GOOS), linux)
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
DOCKER_FLAGS := docker run --rm -i #$(DOCKER_ENVS)
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
DOCKER_FLAGS += -t
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
OSTREE_BUILD_TAG = $(shell hack/ostree_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(OSTREE_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override BUILDTAGS = containers_image_ostree_stub exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
all: binary docs-in-container
|
||||
|
||||
# Build a docker image (skopeobuild) that has everything we need to build.
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'binary-local' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@echo " * 'check' - Including above validate, test-integration and test-unit"
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
# Build w/o using containers
|
||||
binary-local:
|
||||
$(GPGME_ENV) $(GO) build ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
@@ -81,13 +101,17 @@ binary-local-static:
|
||||
$(GPGME_ENV) $(GO) build -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" .
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
clean:
|
||||
rm -f skopeo docs/*.1
|
||||
@@ -103,29 +127,40 @@ install-binary: ./skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
|
||||
install-docs: docs/skopeo.1
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/skopeo.1 ${MANINSTALLDIR}/man1/skopeo.1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration
|
||||
check: validate test-unit test-integration test-system
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
test-system: build-container
|
||||
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
|
||||
$(CONTAINER_CMD) --privileged --net=host \
|
||||
-v $$DTEMP:/var/lib/containers:Z \
|
||||
"$(IMAGE)" \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
|
||||
rc=$$?; \
|
||||
$(RM) -rf $$DTEMP; \
|
||||
exit $$rc
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
@@ -134,7 +169,12 @@ validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor: vendor.conf
|
||||
vndr -whitelist '^github.com/containers/image/docs/.*'
|
||||
.install.vndr:
|
||||
$(GO) get -u github.com/LK4D4/vndr
|
||||
|
||||
vendor: vendor.conf .install.vndr
|
||||
$(GOPATH)/bin/vndr \
|
||||
-whitelist '^github.com/containers/image/docs/.*' \
|
||||
-whitelist '^github.com/containers/image/registries.conf'
|
||||
|
||||
46
README.md
46
README.md
@@ -1,7 +1,7 @@
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
|
||||
<img src="https://cdn.rawgit.com/projectatomic/skopeo/master/docs/skopeo.svg" width="250">
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -12,7 +12,7 @@ skopeo [ to also build documentation, see below.
|
||||
```
|
||||
|
||||
To build a pure-Go static binary (disables ostree, devicemapper, btrfs, and gpgme):
|
||||
|
||||
```sh
|
||||
$ make binary-static DISABLE_CGO=1
|
||||
```
|
||||
|
||||
### Building documentation
|
||||
To build the manual you will need go-md2man.
|
||||
```sh
|
||||
@@ -222,34 +229,7 @@ NOT TODO
|
||||
CONTRIBUTING
|
||||
-
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
@@ -12,95 +11,35 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.
|
||||
func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {
|
||||
sourceCtx, err := contextFromGlobalOptions(c, "src-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
additionalTags cli.StringSlice // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
|
||||
destinationCtx, err := contextFromGlobalOptions(c, "dest-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sourceCtx, destinationCtx, nil
|
||||
}
|
||||
|
||||
func copyHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 2 {
|
||||
return errors.New("Usage: copy source destination")
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
}
|
||||
|
||||
policyContext, err := getPolicyContext(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(c.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", c.Args()[1], err)
|
||||
}
|
||||
signBy := c.String("sign-by")
|
||||
removeSignatures := c.Bool("remove-signatures")
|
||||
|
||||
sourceCtx, destinationCtx, err := contextsFromGlobalOptions(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if c.IsSet("format") {
|
||||
switch c.String("format") {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", c.String("format"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.IsSet("additional-tag") {
|
||||
for _, image := range c.StringSlice("additional-tag") {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
}
|
||||
|
||||
return copy.Image(context.Background(), policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
return cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
@@ -109,86 +48,112 @@ var copyCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: copyHandler,
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-creds, screds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the source registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-creds, dcreds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the destination registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "src-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container source registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "dest-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container destination registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-ostree-tmp-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to fetch retrieved blobs (OCI layout sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to store retrieved blobs (OCI layout destinations only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dest-compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon destinations only)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: commandAction(opts.run),
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: append(append(append([]cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "Suppress output information when copying images",
|
||||
Destination: &opts.quiet,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
Destination: &opts.removeSignatures,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
Destination: &opts.signByFingerprint,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageNames[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(imageNames[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", imageNames[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range opts.additionalTags {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
@@ -11,27 +11,22 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func deleteHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ref.DeleteImage(context.Background(), sys)
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
func deleteCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
@@ -39,26 +34,33 @@ var deleteCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: deleteHandler,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageName, err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}
|
||||
|
||||
75
cmd/skopeo/flag.go
Normal file
75
cmd/skopeo/flag.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// newOptionalBool
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func newOptionalBoolValue(p *optionalBool) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
239
cmd/skopeo/flag_test.go
Normal file
239
cmd/skopeo/flag_test.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OB",
|
||||
Value: newOptionalBoolValue(&globalOB),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OB",
|
||||
Value: newOptionalBoolValue(&commandOB),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OB",
|
||||
Value: newOptionalBoolValue(&ob),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OS",
|
||||
Value: newOptionalStringValue(&globalOS),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OS",
|
||||
Value: newOptionalStringValue(&commandOS),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OS",
|
||||
Value: newOptionalStringValue(&os),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -30,10 +30,24 @@ type inspectOutput struct {
|
||||
Layers []string
|
||||
}
|
||||
|
||||
var inspectCmd = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
|
||||
Supported transports:
|
||||
@@ -41,94 +55,121 @@ var inspectCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
ctx := context.Background()
|
||||
|
||||
img, err := parseImage(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Bool("raw") {
|
||||
_, err := c.App.Writer.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Possibly overridden for a docker.Image.
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for a docker.Image.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerImg, ok := img.(*docker.Image); ok {
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags(ctx)
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(out))
|
||||
return nil
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest or configuration",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "config",
|
||||
Usage: "output configuration",
|
||||
Destination: &opts.config,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img, err := parseImage(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.config && opts.raw {
|
||||
configBlob, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %v", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,126 +1,150 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var layersCmd = cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range args[1:] {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawSource, err := parseImageSource(ctx, c, c.Args()[0])
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit(ctx)
|
||||
},
|
||||
return dest.Commit(ctx)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -15,8 +17,22 @@ import (
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
// createApp returns a cli.App to be run or tested.
|
||||
func createApp() *cli.App {
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cli.App, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
@@ -28,85 +44,112 @@ func createApp() *cli.App {
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &opts.debug,
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
cli.GenericFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "policy",
|
||||
Value: "",
|
||||
Usage: "Path to a trust policy file",
|
||||
Name: "policy",
|
||||
Usage: "Path to a trust policy file",
|
||||
Destination: &opts.policyPath,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Destination: &opts.insecurePolicy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Name: "registries.d",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Destination: &opts.registriesDirPath,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-arch",
|
||||
Value: "",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Name: "override-arch",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Destination: &opts.overrideArch,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-os",
|
||||
Value: "",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Name: "override-os",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Destination: &opts.overrideOS,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to the registries.conf file",
|
||||
Destination: &opts.registriesConfPath,
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalIsSet("tls-verify") {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
copyCmd,
|
||||
inspectCmd,
|
||||
layersCmd,
|
||||
deleteCmd,
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
copyCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
untrustedSignatureDumpCmd(),
|
||||
}
|
||||
return app
|
||||
return app, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(ctx *cli.Context) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext handles the global "policy" flag.
|
||||
func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
// getPolicyContext returns a *signature.PolicyContext based on opts.
|
||||
func (opts *globalOptions) getPolicyContext() (*signature.PolicyContext, error) {
|
||||
var policy *signature.Policy // This could be cached across calls in opts.
|
||||
var err error
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
if opts.insecurePolicy {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
} else if opts.policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
policy, err = signature.NewPolicyFromFile(opts.policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// commandTimeoutContext returns a context.Context and a cancellation callback based on opts.
|
||||
// The caller should usually "defer cancel()" immediately after calling this.
|
||||
func (opts *globalOptions) commandTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if opts.commandTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, opts.commandTimeout)
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import "bytes"
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
|
||||
@@ -3,17 +3,31 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func manifestDigest(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() cli.Command {
|
||||
opts := manifestDigestOptions{}
|
||||
return cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -23,13 +37,6 @@ func manifestDigest(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(context.App.Writer, "%s\n", digest)
|
||||
fmt.Fprintf(stdout, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifestDigestCmd = cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: manifestDigest,
|
||||
}
|
||||
|
||||
@@ -4,20 +4,41 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func standaloneSign(c *cli.Context) error {
|
||||
outputFile := c.String("output")
|
||||
if len(c.Args()) != 3 || outputFile == "" {
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
}
|
||||
|
||||
func standaloneSignCmd() cli.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
Destination: &opts.output,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 3 || opts.output == "" {
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
dockerReference := c.Args()[1]
|
||||
fingerprint := c.Args()[2]
|
||||
manifestPath := args[0]
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -34,33 +55,33 @@ func standaloneSign(c *cli.Context) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(outputFile, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", outputFile, err)
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneSignCmd = cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: standaloneSign,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
},
|
||||
},
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerify(c *cli.Context) error {
|
||||
if len(c.Args()) != 4 {
|
||||
func standaloneVerifyCmd() cli.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 4 {
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
expectedDockerReference := c.Args()[1]
|
||||
expectedFingerprint := c.Args()[2]
|
||||
signaturePath := c.Args()[3]
|
||||
manifestPath := args[0]
|
||||
expectedDockerReference := args[1]
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -81,22 +102,35 @@ func standaloneVerify(c *cli.Context) error {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(c.App.Writer, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
fmt.Fprintf(stdout, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneVerifyCmd = cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
func untrustedSignatureDumpCmd() cli.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
return cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := c.Args()[0]
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
@@ -111,20 +145,6 @@ func untrustedSignatureDump(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(untrustedOut))
|
||||
fmt.Fprintln(stdout, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
11
cmd/skopeo/unshare.go
Normal file
11
cmd/skopeo/unshare.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
47
cmd/skopeo/unshare_linux.go
Normal file
47
cmd/skopeo/unshare_linux.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/buildah/pkg/unshare"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
capability.CAP_CHOWN,
|
||||
capability.CAP_DAC_OVERRIDE,
|
||||
capability.CAP_FOWNER,
|
||||
capability.CAP_FSETID,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_SETFCAP,
|
||||
}
|
||||
|
||||
func maybeReexec() error {
|
||||
// With Skopeo we need only the subset of the root capabilities necessary
|
||||
// for pulling an image to the storage. Do not attempt to create a namespace
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
// We miss a capability we need, create a user namespaces
|
||||
unshare.MaybeReexecUsingUserNamespace(true)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage are used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
transport := alltransports.TransportFromImageName(imageName)
|
||||
if transport != nil && transport.Name() == storage.Transport.Name() {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -10,36 +11,184 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemContext, error) {
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the cli.ActionFunc interface and the real handler,
|
||||
// primarily to ensure that cli.Context is not available to the handler, which in turn
|
||||
// makes sure that the cli.String() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the Destination: members of cli.Flag.
|
||||
// handler may return errorShouldDisplayUsage to cause cli.ShowSubcommandHelp to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := handler(([]string)(c.Args()), c.App.Writer)
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
cli.ShowSubcommandHelp(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() ([]cli.Flag, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
return []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
Destination: &opts.authFilePath,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageOptions) {
|
||||
opts := imageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
}
|
||||
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
credsOptionExtra := ""
|
||||
if credsOptionAlias != "" {
|
||||
credsOptionExtra += "," + credsOptionAlias
|
||||
}
|
||||
|
||||
return []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "creds" + credsOptionExtra,
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
Value: newOptionalStringValue(&opts.credsOption),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "cert-dir",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon",
|
||||
Destination: &opts.dockerCertPath,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)",
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "shared-blob-dir",
|
||||
Usage: "`DIRECTORY` to use to share blobs across OCI repositories",
|
||||
Destination: &opts.sharedBlobDir,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "daemon-host",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "no-creds",
|
||||
Usage: "Access the registry anonymously",
|
||||
Destination: &opts.noCreds,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: c.GlobalString("registries.d"),
|
||||
ArchitectureChoice: c.GlobalString("override-arch"),
|
||||
OSChoice: c.GlobalString("override-os"),
|
||||
DockerCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
// DEPRECATED: keep this here for backward compatibility, but override
|
||||
// them if per subcommand flags are provided (see below).
|
||||
DockerInsecureSkipTLSVerify: !c.GlobalBoolT("tls-verify"),
|
||||
OSTreeTmpDirPath: c.String(flagPrefix + "ostree-tmp-dir"),
|
||||
OCISharedBlobDirPath: c.String(flagPrefix + "shared-blob-dir"),
|
||||
DirForceCompress: c.Bool(flagPrefix + "compress"),
|
||||
AuthFilePath: c.String("authfile"),
|
||||
DockerDaemonHost: c.String(flagPrefix + "daemon-host"),
|
||||
DockerDaemonCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
DockerDaemonInsecureSkipTLSVerify: !c.BoolT(flagPrefix + "tls-verify"),
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
SystemRegistriesConfPath: opts.global.registriesConfPath,
|
||||
}
|
||||
if c.IsSet(flagPrefix + "tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT(flagPrefix + "tls-verify")
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
if c.IsSet(flagPrefix + "creds") {
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.global.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.global.tlsVerify.value)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String(flagPrefix + "creds"))
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
osTreeTmpDir string // A directory to use for OSTree temporary files
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
|
||||
return append(genericFlags, []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "ostree-tmp-dir",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
Destination: &opts.osTreeTmpDir,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
Destination: &opts.dirForceCompression,
|
||||
},
|
||||
}...), &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx, err := opts.imageOptions.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
@@ -67,13 +216,12 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
func parseImage(ctx context.Context, opts *imageOptions, name string) (types.ImageCloser, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,12 +230,12 @@ func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error)
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, c *cli.Context, name string) (types.ImageSource, error) {
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
184
cmd/skopeo/utils_test.go
Normal file
184
cmd/skopeo/utils_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) *globalOptions {
|
||||
app, opts := createApp()
|
||||
|
||||
flagSet := flag.NewFlagSet(app.Name, flag.ContinueOnError)
|
||||
for _, f := range app.Flags {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(flags)
|
||||
require.NoError(t, err)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageDestOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-ostree-tmp-dir", "/srv/ostree-tmp-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
OSTreeTmpDirPath: "/srv/ostree-tmp-dir",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DirForceCompress: true,
|
||||
}, res)
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -5,20 +5,37 @@
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
case "$prev" in
|
||||
$options_with_args)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" -o "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
COMPREPLY=( $( compgen -W "$transports" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
${PROG} $subcommand --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
@@ -38,9 +55,15 @@ _skopeo_copy() {
|
||||
local boolean_options="
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
@@ -50,15 +73,22 @@ _skopeo_inspect() {
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
@@ -89,49 +119,56 @@ _skopeo_delete() {
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -149,15 +186,17 @@ _cli_bash_autocomplete() {
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "!${words[$counter]}" in
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
|
||||
83
docs/skopeo-copy.1.md
Normal file
83
docs/skopeo-copy.1.md
Normal file
@@ -0,0 +1,83 @@
|
||||
% skopeo-copy(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
52
docs/skopeo-delete.1.md
Normal file
52
docs/skopeo-delete.1.md
Normal file
@@ -0,0 +1,52 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
71
docs/skopeo-inspect.1.md
Normal file
71
docs/skopeo-inspect.1.md
Normal file
@@ -0,0 +1,71 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
26
docs/skopeo-manifest-digest.1.md
Normal file
26
docs/skopeo-manifest-digest.1.md
Normal file
@@ -0,0 +1,26 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
34
docs/skopeo-standalone-sign.1.md
Normal file
34
docs/skopeo-standalone-sign.1.md
Normal file
@@ -0,0 +1,34 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
36
docs/skopeo-standalone-verify.1.md
Normal file
36
docs/skopeo-standalone-verify.1.md
Normal file
@@ -0,0 +1,36 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
245
docs/skopeo.1.md
245
docs/skopeo.1.md
@@ -1,11 +1,13 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
# NAME
|
||||
## NAME
|
||||
skopeo -- Command line utility used to interact with local and remote container images and container image registries
|
||||
# SYNOPSIS
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
# DESCRIPTION
|
||||
|
||||
## DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries.
|
||||
|
||||
`skopeo` can copy container images between various containers image stores, converting them as necessary. For example you can use `skopeo` to copy container images from one container registry to another.
|
||||
@@ -31,7 +33,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -45,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
# OPTIONS
|
||||
## OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
@@ -59,234 +61,35 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
# COMMANDS
|
||||
## COMMANDS
|
||||
|
||||
## skopeo copy
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## skopeo delete
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## skopeo inspect
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## skopeo manifest-digest
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## skopeo standalone-sign
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## skopeo standalone-verify
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## skopeo help
|
||||
show help for `skopeo`
|
||||
|
||||
# FILES
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
# EXAMPLES
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
|
||||
## skopeo copy
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
## skopeo delete
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
## skopeo inspect
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
## skopeo layers
|
||||
Another method to retrieve the layers for the busybox image from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo layers docker://busybox
|
||||
$ ls layers-500650331/
|
||||
8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
manifest.json
|
||||
a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar
|
||||
```
|
||||
## skopeo manifest-digest
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
## skopeo standalone-sign
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
See `skopeo copy` above for the preferred method of signing images.
|
||||
## skopeo standalone-verify
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
kpod-login(1), docker-login(1)
|
||||
|
||||
# AUTHORS
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
7
hack/btrfs_installed_tag.sh
Executable file
7
hack/btrfs_installed_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo exclude_graphdriver_btrfs
|
||||
fi
|
||||
@@ -6,7 +6,7 @@ set -e
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/projectatomic/skopeo). Whatever version is checked out
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
@@ -19,7 +19,7 @@ set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/projectatomic/skopeo'
|
||||
export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/projectatomic/skopeo.git'
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
|
||||
18
hack/make/test-system
Executable file
18
hack/make/test-system
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
|
||||
# Run tests
|
||||
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest
|
||||
6
hack/ostree_tag.sh
Executable file
6
hack/ostree_tag.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
if pkg-config ostree-1 2> /dev/null ; then
|
||||
echo ostree
|
||||
else
|
||||
echo containers_image_ostree_stub
|
||||
fi
|
||||
@@ -4,13 +4,14 @@ set -e
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_projectatomic="${GOPATH}/src/github.com/projectatomic"
|
||||
mkdir -vp ${_projectatomic}
|
||||
ln -vsf $(pwd) ${_projectatomic}/skopeo
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go get -u github.com/cpuguy83/go-md2man github.com/golang/lint/golint
|
||||
go version
|
||||
go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_projectatomic}/skopeo
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local binary-local
|
||||
sudo make install
|
||||
skopeo -v
|
||||
|
||||
13
hack/tree_status.sh
Executable file
13
hack/tree_status.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
else
|
||||
echo "tree is dirty, please commit all changes and sync the vendor.conf"
|
||||
echo ""
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
fi
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -87,3 +87,7 @@ func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C)
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "inspect", "unknown")
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/image-tools/image"
|
||||
)
|
||||
@@ -64,7 +64,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%commit\n",
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
key, key)
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
@@ -662,3 +662,41 @@ func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "copy", "unknown:transport", "unknown:test")
|
||||
}
|
||||
|
||||
28
integration/fixtures/registries.conf
Normal file
28
integration/fixtures/registries.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
[[registry]]
|
||||
location = "mirror.invalid"
|
||||
mirror = [
|
||||
{ location = "mirror-0.invalid" },
|
||||
{ location = "mirror-1.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
# This entry is currently unused and exists only to ensure
|
||||
# that the mirror.invalid/busybox is not rewritten twice.
|
||||
[[registry]]
|
||||
location = "gcr.io"
|
||||
prefix = "gcr.io/google-containers"
|
||||
|
||||
[[registry]]
|
||||
location = "invalid.invalid"
|
||||
mirror = [
|
||||
{ location = "invalid-mirror-0.invalid" },
|
||||
{ location = "invalid-mirror-1.invalid" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "gcr.invalid"
|
||||
prefix = "gcr.invalid/foo/bar"
|
||||
mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
@@ -44,7 +44,7 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
19
systemtest/001-basic.bats
Normal file
19
systemtest/001-basic.bats
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Simplest set of skopeo tests. If any of these fail, we have serious problems.
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
# Override standard setup! We don't yet trust anything
|
||||
function setup() {
|
||||
:
|
||||
}
|
||||
|
||||
@test "skopeo version emits reasonable output" {
|
||||
run_skopeo --version
|
||||
|
||||
expect_output --substring "skopeo version [0-9.]+"
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
67
systemtest/010-inspect.bats
Normal file
67
systemtest/010-inspect.bats
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Simplest test for skopeo inspect
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
@test "inspect: basic" {
|
||||
workdir=$TESTDIR/inspect
|
||||
|
||||
remote_image=docker://quay.io/libpod/alpine_labels:latest
|
||||
# Inspect remote source, then pull it. There's a small race condition
|
||||
# in which the remote image can get updated between the inspect and
|
||||
# the copy; let's just not worry about it.
|
||||
run_skopeo inspect $remote_image
|
||||
inspect_remote=$output
|
||||
|
||||
# Now pull it into a directory
|
||||
run_skopeo copy $remote_image dir:$workdir
|
||||
expect_output --substring "Getting image source signatures"
|
||||
expect_output --substring "Writing manifest to image destination"
|
||||
|
||||
# Unpacked contents must include a manifest and version
|
||||
[ -e $workdir/manifest.json ]
|
||||
[ -e $workdir/version ]
|
||||
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
# For each of the given keys (LHS of the table below):
|
||||
# 1) Get local and remote values
|
||||
# 2) Sanity-check local value using simple expression
|
||||
# 3) Confirm that local and remote values match.
|
||||
#
|
||||
# The reason for (2) is to make sure that we don't compare bad results
|
||||
#
|
||||
# The reason for a hardcoded list, instead of 'jq keys', is that RepoTags
|
||||
# is always empty locally, but a list remotely.
|
||||
while read key expect; do
|
||||
local=$(echo "$inspect_local" | jq -r ".$key")
|
||||
remote=$(echo "$inspect_remote" | jq -r ".$key")
|
||||
|
||||
expect_output --from="$local" --substring "$expect" \
|
||||
"local $key is sane"
|
||||
|
||||
expect_output --from="$remote" "$local" \
|
||||
"local $key matches remote"
|
||||
done <<END_EXPECT
|
||||
Architecture amd64
|
||||
Created [0-9-]+T[0-9:]+\.[0-9]+Z
|
||||
Digest sha256:[0-9a-f]{64}
|
||||
DockerVersion [0-9]+\.[0-9][0-9.-]+
|
||||
Labels \\\{.*PODMAN.*podman.*\\\}
|
||||
Layers \\\[.*sha256:.*\\\]
|
||||
Os linux
|
||||
END_EXPECT
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
79
systemtest/020-copy.bats
Normal file
79
systemtest/020-copy.bats
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Copy tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry reg
|
||||
}
|
||||
|
||||
# From remote, to dir1, to local, to dir2;
|
||||
# compare dir1 and dir2, expect no changes
|
||||
@test "copy: dir, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy $remote_image dir:$dir1
|
||||
run_skopeo copy --dest-tls-verify=false dir:$dir1 $localimg
|
||||
run_skopeo copy --src-tls-verify=false $localimg dir:$dir2
|
||||
|
||||
# Both extracted copies must be identical
|
||||
diff -urN $dir1 $dir2
|
||||
}
|
||||
|
||||
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
|
||||
@test "copy: oci, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/oci1
|
||||
local dir2=$TESTDIR/oci2
|
||||
|
||||
run_skopeo copy $remote_image oci:$dir1:latest
|
||||
run_skopeo copy --dest-tls-verify=false oci:$dir1:latest $localimg
|
||||
run_skopeo copy --src-tls-verify=false $localimg oci:$dir2:latest
|
||||
|
||||
# Both extracted copies must be identical
|
||||
diff -urN $dir1 $dir2
|
||||
}
|
||||
|
||||
# Same image, extracted once with :tag and once without
|
||||
@test "copy: oci w/ and w/o tags" {
|
||||
local remote_image=docker://busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy $remote_image oci:$dir1
|
||||
run_skopeo copy $remote_image oci:$dir2:withtag
|
||||
|
||||
# Both extracted copies must be identical, except for index.json
|
||||
diff -urN --exclude=index.json $dir1 $dir2
|
||||
|
||||
# ...which should differ only in the tag. (But that's too hard to check)
|
||||
grep '"org.opencontainers.image.ref.name":"withtag"' $dir2/index.json
|
||||
}
|
||||
|
||||
# This one seems unlikely to get fixed
|
||||
@test "copy: bug 651" {
|
||||
skip "Enable this once skopeo issue #651 has been fixed"
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://quay.io/libpod/alpine_labels:latest \
|
||||
docker://localhost:5000/foo
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
32
systemtest/030-local-registry-tls.bats
Normal file
32
systemtest/030-local-registry-tls.bats
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Confirm that skopeo will push to and pull from a local
|
||||
# registry with locally-created TLS certificates.
|
||||
#
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry --with-cert reg
|
||||
}
|
||||
|
||||
@test "local registry, with cert" {
|
||||
# Push to local registry...
|
||||
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
|
||||
docker://busybox:latest \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
|
||||
# ...and pull it back out
|
||||
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned \
|
||||
dir:$TESTDIR/extracted
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
78
systemtest/040-local-registry-auth.bats
Normal file
78
systemtest/040-local-registry-auth.bats
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests with a local registry with auth
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
# Remove old/stale cred file
|
||||
_cred_dir=$TESTDIR/credentials
|
||||
export XDG_RUNTIME_DIR=$_cred_dir
|
||||
mkdir -p $_cred_dir/containers
|
||||
rm -f $_cred_dir/containers/auth.json
|
||||
|
||||
# Start authenticated registry with random password
|
||||
testuser=testuser
|
||||
testpassword=$(random_string 15)
|
||||
|
||||
start_registry --testuser=testuser --testpassword=$testpassword reg
|
||||
}
|
||||
|
||||
@test "auth: credentials on command line" {
|
||||
# No creds
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Wrong user
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=baduser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Wrong password
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Correct creds, but no such image
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "manifest unknown: manifest unknown"
|
||||
|
||||
# These should pass
|
||||
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
|
||||
docker://busybox:latest docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
}
|
||||
|
||||
@test "auth: credentials via podman login" {
|
||||
# Logged in: skopeo should work
|
||||
podman login --tls-verify=false -u $testuser -p $testpassword localhost:5000
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://busybox:latest docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
|
||||
# Logged out: should fail
|
||||
podman logout localhost:5000
|
||||
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
if [[ -n $_cred_dir ]]; then
|
||||
rm -rf $_cred_dir
|
||||
fi
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
151
systemtest/050-signing.bats
Normal file
151
systemtest/050-signing.bats
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests with gpg signing
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
# Create dummy gpg keys
|
||||
export GNUPGHOME=$TESTDIR/skopeo-gpg
|
||||
mkdir --mode=0700 $GNUPGHOME
|
||||
|
||||
# gpg on f30 needs this, otherwise:
|
||||
# gpg: agent_genkey failed: Inappropriate ioctl for device
|
||||
# ...but gpg on f29 (and, probably, Ubuntu) doesn't grok this
|
||||
GPGOPTS='--pinentry-mode loopback'
|
||||
if gpg --pinentry-mode asdf 2>&1 | grep -qi 'Invalid option'; then
|
||||
GPGOPTS=
|
||||
fi
|
||||
|
||||
for k in alice bob;do
|
||||
gpg --batch $GPGOPTS --gen-key --passphrase '' <<END_GPG
|
||||
Key-Type: RSA
|
||||
Name-Real: Test key - $k
|
||||
Name-email: $k@test.redhat.com
|
||||
%commit
|
||||
END_GPG
|
||||
|
||||
gpg --armor --export $k@test.redhat.com >$GNUPGHOME/pubkey-$k.gpg
|
||||
done
|
||||
|
||||
# Registries. The important part here seems to be sigstore,
|
||||
# because (I guess?) the registry itself has no mechanism
|
||||
# for storing or validating signatures.
|
||||
REGISTRIES_D=$TESTDIR/registries.d
|
||||
mkdir $REGISTRIES_D $TESTDIR/sigstore
|
||||
cat >$REGISTRIES_D/registries.yaml <<EOF
|
||||
docker:
|
||||
localhost:5000:
|
||||
sigstore: file://$TESTDIR/sigstore
|
||||
EOF
|
||||
|
||||
# Policy file. Basically, require /myns/alice and /myns/bob
|
||||
# to be signed; allow /open; and reject anything else.
|
||||
POLICY_JSON=$TESTDIR/policy.json
|
||||
cat >$POLICY_JSON <<END_POLICY_JSON
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "reject"
|
||||
}
|
||||
],
|
||||
"transports": {
|
||||
"docker": {
|
||||
"localhost:5000/myns/alice": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "$GNUPGHOME/pubkey-alice.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/bob": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "$GNUPGHOME/pubkey-bob.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/open": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
END_POLICY_JSON
|
||||
|
||||
start_registry reg
|
||||
}
|
||||
|
||||
@test "signing" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
die "exit code is $status; expected $expected_rc"
|
||||
fi
|
||||
|
||||
# Cache local copy
|
||||
run_skopeo copy docker://busybox:latest dir:$TESTDIR/busybox
|
||||
|
||||
# Push a bunch of images. Do so *without* --policy flag; this lets us
|
||||
# sign or not, creating images that will or won't conform to policy.
|
||||
while read path sig comments; do
|
||||
local sign_opt=
|
||||
if [[ $sig != '-' ]]; then
|
||||
sign_opt="--sign-by=${sig}@test.redhat.com"
|
||||
fi
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
$sign_opt \
|
||||
dir:$TESTDIR/busybox \
|
||||
docker://localhost:5000$path
|
||||
done <<END_PUSH
|
||||
/myns/alice:signed alice # Properly-signed image
|
||||
/myns/alice:unsigned - # Unsigned image to path that requires signature
|
||||
/myns/bob:signedbyalice alice # Bad signature: image under /bob
|
||||
/myns/carol:latest - # No signature
|
||||
/open/forall:latest - # No signature, but none needed
|
||||
END_PUSH
|
||||
|
||||
# Done pushing. Now try to fetch. From here on we use the --policy option.
|
||||
# The table below lists the paths to fetch, and the expected errors (or
|
||||
# none, if we expect them to pass).
|
||||
while read path expected_error; do
|
||||
expected_rc=
|
||||
if [[ -n $expected_error ]]; then
|
||||
expected_rc=1
|
||||
fi
|
||||
|
||||
rm -rf $TESTDIR/d
|
||||
run_skopeo $expected_rc \
|
||||
--registries.d $REGISTRIES_D \
|
||||
--policy $POLICY_JSON \
|
||||
copy --src-tls-verify=false \
|
||||
docker://localhost:5000$path \
|
||||
dir:$TESTDIR/d
|
||||
if [[ -n $expected_error ]]; then
|
||||
expect_output --substring "Source image rejected: $expected_error"
|
||||
fi
|
||||
done <<END_TESTS
|
||||
/myns/alice:signed
|
||||
/myns/bob:signedbyalice Invalid GPG signature
|
||||
/myns/alice:unsigned Signature for identity localhost:5000/myns/alice:signed is not accepted
|
||||
/myns/carol:latest Running image docker://localhost:5000/myns/carol:latest is rejected by policy.
|
||||
/open/forall:latest
|
||||
END_TESTS
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
344
systemtest/helpers.bash
Normal file
344
systemtest/helpers.bash
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/bin/bash
|
||||
|
||||
SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
|
||||
|
||||
# Default timeout for a skopeo command.
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
|
||||
# Provide common setup and teardown functions, but do not name them such!
|
||||
# That way individual tests can override with their own setup/teardown,
|
||||
# while retaining the ability to include these if they so desire.
|
||||
|
||||
function standard_setup() {
|
||||
# Argh. Although BATS provides $BATS_TMPDIR, it's just /tmp!
|
||||
# That's bloody worthless. Let's make our own, in which subtests
|
||||
# can write whatever they like and trust that it'll be deleted
|
||||
# on cleanup.
|
||||
TESTDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} skopeo_bats.XXXXXX)
|
||||
}
|
||||
|
||||
function standard_teardown() {
|
||||
if [[ -n $TESTDIR ]]; then
|
||||
rm -rf $TESTDIR
|
||||
fi
|
||||
}
|
||||
|
||||
# Individual .bats files may override or extend these
|
||||
function setup() {
|
||||
standard_setup
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# END setup/teardown
|
||||
###############################################################################
|
||||
# BEGIN standard helpers for running skopeo and testing results
|
||||
|
||||
#################
|
||||
# run_skopeo # Invoke skopeo, with timeout, using BATS 'run'
|
||||
#################
|
||||
#
|
||||
# This is the preferred mechanism for invoking skopeo:
|
||||
#
|
||||
# * we use 'timeout' to abort (with a diagnostic) if something
|
||||
# takes too long; this is preferable to a CI hang.
|
||||
# * we log the command run and its output. This doesn't normally
|
||||
# appear in BATS output, but it will if there's an error.
|
||||
# * we check exit status. Since the normal desired code is 0,
|
||||
# that's the default; but the first argument can override:
|
||||
#
|
||||
# run_skopeo 125 nonexistent-subcommand
|
||||
# run_skopeo '?' some-other-command # let our caller check status
|
||||
#
|
||||
# Since we use the BATS 'run' mechanism, $output and $status will be
|
||||
# defined for our caller.
|
||||
#
|
||||
function run_skopeo() {
|
||||
# Number as first argument = expected exit code; default 0
|
||||
expected_rc=0
|
||||
case "$1" in
|
||||
[0-9]) expected_rc=$1; shift;;
|
||||
[1-9][0-9]) expected_rc=$1; shift;;
|
||||
[12][0-9][0-9]) expected_rc=$1; shift;;
|
||||
'?') expected_rc= ; shift;; # ignore exit code
|
||||
esac
|
||||
|
||||
# Remember command args, for possible use in later diagnostic messages
|
||||
MOST_RECENT_SKOPEO_COMMAND="skopeo $*"
|
||||
|
||||
# stdout is only emitted upon error; this echo is to help a debugger
|
||||
echo "\$ $SKOPEO_BINARY $*"
|
||||
run timeout --foreground --kill=10 $SKOPEO_TIMEOUT ${SKOPEO_BINARY} "$@"
|
||||
# without "quotes", multiple lines are glommed together into one
|
||||
if [ -n "$output" ]; then
|
||||
echo "$output"
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
echo -n "[ rc=$status ";
|
||||
if [ -n "$expected_rc" ]; then
|
||||
if [ "$status" -eq "$expected_rc" ]; then
|
||||
echo -n "(expected) ";
|
||||
else
|
||||
echo -n "(** EXPECTED $expected_rc **) ";
|
||||
fi
|
||||
fi
|
||||
echo "]"
|
||||
fi
|
||||
|
||||
if [ "$status" -eq 124 -o "$status" -eq 137 ]; then
|
||||
# FIXME: 'timeout -v' requires coreutils-8.29; travis seems to have
|
||||
# an older version. If/when travis updates, please add -v
|
||||
# to the 'timeout' command above, and un-comment this out:
|
||||
# if expr "$output" : ".*timeout: sending" >/dev/null; then
|
||||
echo "*** TIMED OUT ***"
|
||||
false
|
||||
fi
|
||||
|
||||
if [ -n "$expected_rc" ]; then
|
||||
if [ "$status" -ne "$expected_rc" ]; then
|
||||
die "exit code is $status; expected $expected_rc"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#########
|
||||
# die # Abort with helpful message
|
||||
#########
|
||||
function die() {
|
||||
echo "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" >&2
|
||||
echo "#| FAIL: $*" >&2
|
||||
echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2
|
||||
false
|
||||
}
|
||||
|
||||
###################
|
||||
# expect_output # Compare actual vs expected string; fail if mismatch
|
||||
###################
|
||||
#
|
||||
# Compares $output against the given string argument. Optional second
|
||||
# argument is descriptive text to show as the error message (default:
|
||||
# the command most recently run by 'run_skopeo'). This text can be
|
||||
# useful to isolate a failure when there are multiple identical
|
||||
# run_skopeo invocations, and the difference is solely in the
|
||||
# config or setup; see, e.g., run.bats:run-cmd().
|
||||
#
|
||||
# By default we run an exact string comparison; use --substring to
|
||||
# look for the given string anywhere in $output.
|
||||
#
|
||||
# By default we look in "$output", which is set in run_skopeo().
|
||||
# To override, use --from="some-other-string" (e.g. "${lines[0]}")
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# expect_output "this is exactly what we expect"
|
||||
# expect_output "foo=bar" "description of this particular test"
|
||||
# expect_output --from="${lines[0]}" "expected first line"
|
||||
#
|
||||
function expect_output() {
|
||||
# By default we examine $output, the result of run_skopeo
|
||||
local actual="$output"
|
||||
local check_substring=
|
||||
|
||||
# option processing: recognize --from="...", --substring
|
||||
local opt
|
||||
for opt; do
|
||||
local value=$(expr "$opt" : '[^=]*=\(.*\)')
|
||||
case "$opt" in
|
||||
--from=*) actual="$value"; shift;;
|
||||
--substring) check_substring=1; shift;;
|
||||
--) shift; break;;
|
||||
-*) die "Invalid option '$opt'" ;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
local expect="$1"
|
||||
local testname="${2:-${MOST_RECENT_SKOPEO_COMMAND:-[no test name given]}}"
|
||||
|
||||
if [ -z "$expect" ]; then
|
||||
if [ -z "$actual" ]; then
|
||||
return
|
||||
fi
|
||||
expect='[no output]'
|
||||
elif [ "$actual" = "$expect" ]; then
|
||||
return
|
||||
elif [ -n "$check_substring" ]; then
|
||||
if [[ "$actual" =~ $expect ]]; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# This is a multi-line message, which may in turn contain multi-line
|
||||
# output, so let's format it ourself, readably
|
||||
local -a actual_split
|
||||
readarray -t actual_split <<<"$actual"
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
printf "#| FAIL: $testname\n" >&2
|
||||
printf "#| expected: '%s'\n" "$expect" >&2
|
||||
printf "#| actual: '%s'\n" "${actual_split[0]}" >&2
|
||||
local line
|
||||
for line in "${actual_split[@]:1}"; do
|
||||
printf "#| > '%s'\n" "$line" >&2
|
||||
done
|
||||
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
|
||||
false
|
||||
}
|
||||
|
||||
#######################
|
||||
# expect_line_count # Check the expected number of output lines
|
||||
#######################
|
||||
#
|
||||
# ...from the most recent run_skopeo command
|
||||
#
|
||||
function expect_line_count() {
|
||||
local expect="$1"
|
||||
local testname="${2:-${MOST_RECENT_SKOPEO_COMMAND:-[no test name given]}}"
|
||||
|
||||
local actual="${#lines[@]}"
|
||||
if [ "$actual" -eq "$expect" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
printf "#| FAIL: $testname\n" >&2
|
||||
printf "#| Expected %d lines of output, got %d\n" $expect $actual >&2
|
||||
printf "#| Output was:\n" >&2
|
||||
local line
|
||||
for line in "${lines[@]}"; do
|
||||
printf "#| >%s\n" "$line" >&2
|
||||
done
|
||||
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
|
||||
false
|
||||
}
|
||||
|
||||
# END standard helpers for running skopeo and testing results
|
||||
###############################################################################
|
||||
# BEGIN helpers for starting/stopping registries
|
||||
|
||||
####################
|
||||
# start_registry # Run a local registry container
|
||||
####################
|
||||
#
|
||||
# Usage: start_registry [OPTIONS] NAME
|
||||
#
|
||||
# OPTIONS
|
||||
# --port=NNNN Port to listen on (default: 5000)
|
||||
# --testuser=XXX Require authentication; this is the username
|
||||
# --testpassword=XXX ...and the password (these two go together)
|
||||
# --with-cert Create a cert for running with TLS (not working)
|
||||
#
|
||||
# NAME is the container name to assign.
|
||||
#
|
||||
start_registry() {
|
||||
local port=5000
|
||||
local testuser=
|
||||
local testpassword=
|
||||
local create_cert=
|
||||
|
||||
# option processing: recognize options for running the registry
|
||||
# in different modes.
|
||||
local opt
|
||||
for opt; do
|
||||
local value=$(expr "$opt" : '[^=]*=\(.*\)')
|
||||
case "$opt" in
|
||||
--port=*) port="$value"; shift;;
|
||||
--testuser=*) testuser="$value"; shift;;
|
||||
--testpassword=*) testpassword="$value"; shift;;
|
||||
--with-cert) create_cert=1; shift;;
|
||||
-*) die "Invalid option '$opt'" ;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
local name=${1?start_registry() invoked without a NAME}
|
||||
|
||||
# Temp directory must be defined and must exist
|
||||
[[ -n $TESTDIR && -d $TESTDIR ]]
|
||||
|
||||
AUTHDIR=$TESTDIR/auth
|
||||
mkdir -p $AUTHDIR
|
||||
|
||||
local -a reg_args=(-v $AUTHDIR:/auth:Z -p $port:5000)
|
||||
|
||||
# cgroup option necessary under podman-in-podman (CI tests),
|
||||
# and doesn't seem to do any harm otherwise.
|
||||
PODMAN="podman --cgroup-manager=cgroupfs"
|
||||
|
||||
# Called with --testuser? Create an htpasswd file
|
||||
if [[ -n $testuser ]]; then
|
||||
if [[ -z $testpassword ]]; then
|
||||
die "start_registry() invoked with testuser but no testpassword"
|
||||
fi
|
||||
|
||||
if ! egrep -q "^$testuser:" $AUTHDIR/htpasswd; then
|
||||
$PODMAN run --rm --entrypoint htpasswd registry:2 \
|
||||
-Bbn $testuser $testpassword >> $AUTHDIR/htpasswd
|
||||
fi
|
||||
|
||||
reg_args+=(
|
||||
-e REGISTRY_AUTH=htpasswd
|
||||
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
|
||||
-e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm"
|
||||
)
|
||||
fi
|
||||
|
||||
# Called with --with-cert? Create certificates.
|
||||
if [[ -n $create_cert ]]; then
|
||||
CERT=$AUTHDIR/domain.crt
|
||||
if [ ! -e $CERT ]; then
|
||||
openssl req -newkey rsa:4096 -nodes -sha256 \
|
||||
-keyout $AUTHDIR/domain.key -x509 -days 2 \
|
||||
-out $CERT \
|
||||
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
fi
|
||||
|
||||
reg_args+=(
|
||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt
|
||||
-e REGISTRY_HTTP_TLS_KEY=/auth/domain.key
|
||||
)
|
||||
|
||||
# Copy .crt file to a directory *without* the .key one, so we can
|
||||
# test the client. (If client sees a matching .key file, it fails)
|
||||
# Thanks to Miloslav Trmac for this hint.
|
||||
mkdir -p $TESTDIR/client-auth
|
||||
cp $CERT $TESTDIR/client-auth/
|
||||
fi
|
||||
|
||||
$PODMAN run -d --name $name "${reg_args[@]}" registry:2
|
||||
|
||||
# Wait for registry to actually come up
|
||||
timeout=10
|
||||
while [[ $timeout -ge 1 ]]; do
|
||||
if curl localhost:$port/; then
|
||||
return
|
||||
fi
|
||||
|
||||
timeout=$(expr $timeout - 1)
|
||||
sleep 1
|
||||
done
|
||||
die "Timed out waiting for registry container to respond on :$port"
|
||||
}
|
||||
|
||||
# END helpers for starting/stopping registries
|
||||
###############################################################################
|
||||
# BEGIN miscellaneous tools
|
||||
|
||||
###################
|
||||
# random_string # Returns a pseudorandom human-readable string
|
||||
###################
|
||||
#
|
||||
# Numeric argument, if present, is desired length of string
|
||||
#
|
||||
function random_string() {
|
||||
local length=${1:-10}
|
||||
|
||||
head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
|
||||
}
|
||||
|
||||
# END miscellaneous tools
|
||||
###############################################################################
|
||||
16
systemtest/run-tests
Executable file
16
systemtest/run-tests
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# run-tests - simple wrapper allowing shortcuts on invocation
|
||||
#
|
||||
|
||||
TEST_DIR=$(dirname $0)
|
||||
TESTS=$TEST_DIR
|
||||
|
||||
for i; do
|
||||
case "$i" in
|
||||
*.bats) TESTS=$i ;;
|
||||
*) TESTS=$(echo $TEST_DIR/*$i*.bats) ;;
|
||||
esac
|
||||
done
|
||||
|
||||
bats $TESTS
|
||||
77
vendor.conf
77
vendor.conf
@@ -1,62 +1,67 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image v2.0.0
|
||||
github.com/containers/buildah v1.8.4
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/containers/storage v1.12.10
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto a4c6cb3142f211c99e4bf4cd769535b29a9b616f
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
# docker/distributions dependencies
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
# end of docker/distribution dependencies
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec 149252121d044fddff670adcdc67f33148e16226
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/image-spec 7b1e489870acb042978a3935d2fb76f8a79aff81
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
go4.org ce4c26f7be8eb27dc77f996b08d286dd80bc4a01 https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
golang.org/x/sys master
|
||||
github.com/opencontainers/selinux v1.1
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||

|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||

|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||

|
||||
|
||||
This continues until we are done.
|
||||
|
||||

|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
||||
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
@@ -178,7 +178,7 @@
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -199,3 +199,4 @@
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# continuity
|
||||
|
||||
[](https://godoc.org/github.com/containerd/continuity)
|
||||
[](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Manifest Format
|
||||
|
||||
A continuity manifest encodes filesystem metadata in Protocol Buffers.
|
||||
Please refer to [proto/manifest.proto](proto/manifest.proto).
|
||||
|
||||
## Usage
|
||||
|
||||
Build:
|
||||
|
||||
```console
|
||||
$ make
|
||||
```
|
||||
|
||||
Create a manifest (of this repo itself):
|
||||
|
||||
```console
|
||||
$ ./bin/continuity build . > /tmp/a.pb
|
||||
```
|
||||
|
||||
Dump a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity ls /tmp/a.pb
|
||||
...
|
||||
-rw-rw-r-- 270 B /.gitignore
|
||||
-rw-rw-r-- 88 B /.mailmap
|
||||
-rw-rw-r-- 187 B /.travis.yml
|
||||
-rw-rw-r-- 359 B /AUTHORS
|
||||
-rw-rw-r-- 11 kB /LICENSE
|
||||
-rw-rw-r-- 1.5 kB /Makefile
|
||||
...
|
||||
-rw-rw-r-- 986 B /testutil_test.go
|
||||
drwxrwxr-x 0 B /version
|
||||
-rw-rw-r-- 478 B /version/version.go
|
||||
```
|
||||
|
||||
Verify a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
Break the directory and restore using the manifest:
|
||||
```console
|
||||
$ chmod 777 Makefile
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
|
||||
$ ./bin/continuity apply . /tmp/a.pb
|
||||
$ stat -c %a Makefile
|
||||
664
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
|
||||
## Contribution Guide
|
||||
### Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```console
|
||||
$ go generate ./proto
|
||||
```
|
||||
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
||||
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
|
||||
github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
|
||||
github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
|
||||
github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
|
||||
golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
|
||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
|
||||
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
|
||||
|
||||
[](https://goreportcard.com/report/github.com/containers/buildah)
|
||||
[](https://travis-ci.org/containers/buildah)
|
||||
|
||||
The Buildah package provides a command line tool that can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
* mount a working container's root filesystem for manipulation
|
||||
* unmount a working container's root filesystem
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
* rename a local container
|
||||
|
||||
## Buildah Information for Developers
|
||||
|
||||
For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
|
||||
|
||||
**[Buildah Demos](demos)**
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**[Development Plan](developmentplan.md)**
|
||||
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
**[Troubleshooting Guide](troubleshooting.md)**
|
||||
|
||||
**[Tutorials](docs/tutorials)**
|
||||
|
||||
## Buildah and Podman relationship
|
||||
|
||||
Buildah and Podman are two complementary open-source projects that are
|
||||
available on most Linux platforms and both projects reside at
|
||||
[GitHub.com](https://github.com) with Buildah
|
||||
[here](https://github.com/containers/buildah) and Podman
|
||||
[here](https://github.com/containers/libpod). Both, Buildah and Podman are
|
||||
command line tools that work on Open Container Initiative (OCI) images and
|
||||
containers. The two projects differentiate in their specialization.
|
||||
|
||||
Buildah specializes in building OCI images. Buildah's commands replicate all
|
||||
of the commands that are found in a Dockerfile. This allows building images
|
||||
with and without Dockerfiles while not requiring any root privileges.
|
||||
Buildah’s ultimate goal is to provide a lower-level coreutils interface to
|
||||
build images. The flexibility of building images without Dockerfiles allows
|
||||
for the integration of other scripting languages into the build process.
|
||||
Buildah follows a simple fork-exec model and does not run as a daemon
|
||||
but it is based on a comprehensive API in golang, which can be vendored
|
||||
into other tools.
|
||||
|
||||
Podman specializes in all of the commands and functions that help you to maintain and modify
|
||||
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
|
||||
created from those images.
|
||||
|
||||
A major difference between Podman and Buildah is their concept of a container. Podman
|
||||
allows users to create "traditional containers" where the intent of these containers is
|
||||
to be long lived. While Buildah containers are really just created to allow content
|
||||
to be added back to the container image. An easy way to think of it is the
|
||||
`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
|
||||
command emulates the `docker run` command in functionality. Because of this and their underlying
|
||||
storage differences, you can not see Podman containers from within Buildah or vice versa.
|
||||
|
||||
In short, Buildah is an efficient way to create OCI images while Podman allows
|
||||
you to manage and maintain those images and containers in a production environment using
|
||||
familiar container cli commands. For more details, see the
|
||||
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
|
||||
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<"EOF"
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=$(buildah from "${1:-fedora}")
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run "$ctr1" -- dnf update -y
|
||||
buildah run "$ctr1" -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
|
||||
buildah config --port 80 "$ctr1"
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit "$ctr1" "${2:-$USER/lighttpd}"
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
|
||||
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
|
||||
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
|
||||
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
|
||||
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
|
||||
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
|
||||
| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. |
|
||||
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
|
||||
|
||||
**Future goals include:**
|
||||
* more CI tests
|
||||
* additional CLI commands (?)
|
||||
287
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
287
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Open Source projects like conda-forge, want to package podman and are based
|
||||
off of centos:6, Conda-force has minimal libc requirements and is lacking
|
||||
the memfd.h file, so we use mmam.h
|
||||
*/
|
||||
#ifndef MFD_ALLOW_SEALING
|
||||
#define MFD_ALLOW_SEALING 2U
|
||||
#endif
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 1U
|
||||
#endif
|
||||
|
||||
#ifndef F_LINUX_SPECIFIC_BASE
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
#ifndef F_ADD_SEALS
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
#endif
|
||||
#ifndef F_SEAL_SEAL
|
||||
#define F_SEAL_SEAL 0x0001LU
|
||||
#endif
|
||||
#ifndef F_SEAL_SHRINK
|
||||
#define F_SEAL_SHRINK 0x0002LU
|
||||
#endif
|
||||
#ifndef F_SEAL_GROW
|
||||
#define F_SEAL_GROW 0x0004LU
|
||||
#endif
|
||||
#ifndef F_SEAL_WRITE
|
||||
#define F_SEAL_WRITE 0x0008LU
|
||||
#endif
|
||||
|
||||
#define BUFSTEP 1024
|
||||
|
||||
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
|
||||
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
|
||||
static int _containers_unshare_parse_envint(const char *envname) {
|
||||
char *p, *q;
|
||||
long l;
|
||||
|
||||
p = getenv(envname);
|
||||
if (p == NULL) {
|
||||
return -1;
|
||||
}
|
||||
q = NULL;
|
||||
l = strtol(p, &q, 10);
|
||||
if ((q == NULL) || (*q != '\0')) {
|
||||
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
|
||||
_exit(1);
|
||||
}
|
||||
unsetenv(envname);
|
||||
return l;
|
||||
}
|
||||
|
||||
static void _check_proc_sys_file(const char *path)
|
||||
{
|
||||
FILE *fp;
|
||||
char buf[32];
|
||||
size_t n_read;
|
||||
long r;
|
||||
|
||||
fp = fopen(path, "r");
|
||||
if (fp == NULL) {
|
||||
if (errno != ENOENT)
|
||||
fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces);
|
||||
} else {
|
||||
memset(buf, 0, sizeof(buf));
|
||||
n_read = fread(buf, 1, sizeof(buf) - 1, fp);
|
||||
if (n_read > 0) {
|
||||
r = atoi(buf);
|
||||
if (r == 0) {
|
||||
fprintf(stderr, "User namespaces are not enabled in %s.\n", path);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
static char **parse_proc_stringlist(const char *list) {
|
||||
int fd, n, i, n_strings;
|
||||
char *buf, *new_buf, **ret;
|
||||
size_t size, new_size, used;
|
||||
|
||||
fd = open(list, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
buf = NULL;
|
||||
size = 0;
|
||||
used = 0;
|
||||
for (;;) {
|
||||
new_size = used + BUFSTEP;
|
||||
new_buf = realloc(buf, new_size);
|
||||
if (new_buf == NULL) {
|
||||
free(buf);
|
||||
fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
|
||||
return NULL;
|
||||
}
|
||||
buf = new_buf;
|
||||
size = new_size;
|
||||
memset(buf + used, '\0', size - used);
|
||||
n = read(fd, buf + used, size - used - 1);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read(): %m\n");
|
||||
return NULL;
|
||||
}
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
used += n;
|
||||
}
|
||||
close(fd);
|
||||
n_strings = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
n_strings++;
|
||||
}
|
||||
}
|
||||
ret = calloc(n_strings + 1, sizeof(char *));
|
||||
if (ret == NULL) {
|
||||
fprintf(stderr, "calloc(): out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
i = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
ret[i++] = &buf[n];
|
||||
}
|
||||
}
|
||||
ret[i] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int containers_reexec(void) {
|
||||
char **argv, *exename;
|
||||
int fd, mmfd, n_read, n_written;
|
||||
struct stat st;
|
||||
char buf[2048];
|
||||
|
||||
argv = parse_proc_stringlist("/proc/self/cmdline");
|
||||
if (argv == NULL) {
|
||||
return -1;
|
||||
}
|
||||
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fstat(fd, &st) == -1) {
|
||||
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
exename = basename(argv[0]);
|
||||
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
|
||||
if (mmfd == -1) {
|
||||
fprintf(stderr, "memfd_create(): %m\n");
|
||||
return -1;
|
||||
}
|
||||
for (;;) {
|
||||
n_read = read(fd, buf, sizeof(buf));
|
||||
if (n_read < 0) {
|
||||
fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_read == 0) {
|
||||
break;
|
||||
}
|
||||
n_written = write(mmfd, buf, n_read);
|
||||
if (n_written < 0) {
|
||||
fprintf(stderr, "write(anonfd): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_written != n_read) {
|
||||
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error sealing memfd copy: %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fexecve(mmfd, argv, environ) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error during reexec(...): %m\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void _containers_unshare(void)
|
||||
{
|
||||
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
|
||||
char buf[2048];
|
||||
|
||||
flags = _containers_unshare_parse_envint("_Containers-unshare");
|
||||
if (flags == -1) {
|
||||
return;
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (unshare(CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
|
||||
_check_proc_sys_file (_max_user_namespaces);
|
||||
_check_proc_sys_file (_unprivileged_user_namespaces);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
|
||||
if (pidfd != -1) {
|
||||
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
|
||||
size_t size = write(pidfd, buf, strlen(buf));
|
||||
if (size != strlen(buf)) {
|
||||
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
|
||||
_exit(1);
|
||||
}
|
||||
close(pidfd);
|
||||
}
|
||||
continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
|
||||
if (continuefd != -1) {
|
||||
n = read(continuefd, buf, sizeof(buf));
|
||||
if (n > 0) {
|
||||
fprintf(stderr, "Error: %.*s\n", n, buf);
|
||||
_exit(1);
|
||||
}
|
||||
close(continuefd);
|
||||
}
|
||||
sid = _containers_unshare_parse_envint("_Containers-setsid");
|
||||
if (sid == 1) {
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "Error during setsid: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
|
||||
if (pgrp == 1) {
|
||||
if (setpgrp() == -1) {
|
||||
fprintf(stderr, "Error during setpgrp: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
ctty = _containers_unshare_parse_envint("_Containers-ctty");
|
||||
if (ctty != -1) {
|
||||
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
|
||||
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (setresgid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresgid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
if (setresuid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresuid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & ~CLONE_NEWUSER) != 0) {
|
||||
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(...): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if (containers_reexec() != 0) {
|
||||
_exit(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
572
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
572
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
@@ -0,0 +1,572 @@
|
||||
// +build linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
// handles setting ID maps and other related settings by triggering
|
||||
// initialization code in the child.
|
||||
type Cmd struct {
|
||||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
Ctty *os.File
|
||||
OOMScoreAdj *int
|
||||
Hook func(pid int) error
|
||||
}
|
||||
|
||||
// Command creates a new Cmd which can be customized.
|
||||
func Command(args ...string) *Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
return &Cmd{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set an environment variable to tell the child to synchronize its startup.
|
||||
if c.Env == nil {
|
||||
c.Env = os.Environ()
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
|
||||
|
||||
// Please the libpod "rootless" package to find the expected env variables.
|
||||
if os.Geteuid() != 0 {
|
||||
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", os.Getegid()))
|
||||
}
|
||||
|
||||
// Create the pipe for reading the child's PID.
|
||||
pidRead, pidWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
|
||||
|
||||
// Create the pipe for letting the child know to proceed.
|
||||
continueRead, continueWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
pidRead.Close()
|
||||
pidWrite.Close()
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, continueRead)
|
||||
|
||||
// Pass along other instructions.
|
||||
if c.Setsid {
|
||||
c.Env = append(c.Env, "_Containers-setsid=1")
|
||||
}
|
||||
if c.Setpgrp {
|
||||
c.Env = append(c.Env, "_Containers-setpgrp=1")
|
||||
}
|
||||
if c.Ctty != nil {
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
|
||||
}
|
||||
|
||||
// Make sure we clean up our pipes.
|
||||
defer func() {
|
||||
if pidRead != nil {
|
||||
pidRead.Close()
|
||||
}
|
||||
if pidWrite != nil {
|
||||
pidWrite.Close()
|
||||
}
|
||||
if continueRead != nil {
|
||||
continueRead.Close()
|
||||
}
|
||||
if continueWrite != nil {
|
||||
continueWrite.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the new process.
|
||||
err = c.Cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
pidWrite.Close()
|
||||
pidWrite = nil
|
||||
|
||||
// Read the child's PID from the pipe.
|
||||
pidString := ""
|
||||
b := new(bytes.Buffer)
|
||||
io.Copy(b, pidRead)
|
||||
pidString = b.String()
|
||||
pid, err := strconv.Atoi(pidString)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error parsing PID %q", pidString)
|
||||
}
|
||||
pidString = fmt.Sprintf("%d", pid)
|
||||
|
||||
// If we created a new user namespace, set any specified mappings.
|
||||
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
// Always set "setgroups".
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if c.GidMappingsEnableSetgroups {
|
||||
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
} else {
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
|
||||
uidmap, gidmap, err := GetHostIDMappings("")
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
|
||||
return errors.Wrapf(err, "error reading ID mappings in parent")
|
||||
}
|
||||
if len(c.UidMappings) == 0 {
|
||||
c.UidMappings = uidmap
|
||||
for i := range c.UidMappings {
|
||||
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(c.GidMappings) == 0 {
|
||||
c.GidMappings = gidmap
|
||||
for i := range c.GidMappings {
|
||||
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.GidMappings) > 0 {
|
||||
// Build the GID map, since writing to the proc file has to be done all at once.
|
||||
g := new(bytes.Buffer)
|
||||
for _, m := range c.GidMappings {
|
||||
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
gidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewgidmap {
|
||||
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
gidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
g.Reset()
|
||||
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
|
||||
}
|
||||
}
|
||||
if !gidmapSet {
|
||||
if c.UseNewgidmap {
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
|
||||
}
|
||||
defer gidmap.Close()
|
||||
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) > 0 {
|
||||
// Build the UID map, since writing to the proc file has to be done all at once.
|
||||
u := new(bytes.Buffer)
|
||||
for _, m := range c.UidMappings {
|
||||
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
uidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewuidmap {
|
||||
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
uidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
u.Reset()
|
||||
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
|
||||
}
|
||||
}
|
||||
if !uidmapSet {
|
||||
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
|
||||
}
|
||||
defer uidmap.Close()
|
||||
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OOMScoreAdj != nil {
|
||||
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
|
||||
}
|
||||
defer oomScoreAdj.Close()
|
||||
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
|
||||
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
|
||||
}
|
||||
}
|
||||
// Run any additional setup that we want to do before the child starts running proper.
|
||||
if c.Hook != nil {
|
||||
if err = c.Hook(pid); err != nil {
|
||||
fmt.Fprintf(continueWrite, "hook error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
func (c *Cmd) CombinedOutput() ([]byte, error) {
|
||||
return nil, errors.New("unshare: CombinedOutput() not implemented")
|
||||
}
|
||||
|
||||
func (c *Cmd) Output() ([]byte, error) {
|
||||
return nil, errors.New("unshare: Output() not implemented")
|
||||
}
|
||||
|
||||
var (
|
||||
isRootlessOnce sync.Once
|
||||
isRootless bool
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
isRootlessOnce.Do(func() {
|
||||
isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
|
||||
})
|
||||
return isRootless
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
}
|
||||
|
||||
type Runnable interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) {
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
} else {
|
||||
logrus.Errorf("%v", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
// If we've already been through this once, no need to try again.
|
||||
if os.Geteuid() == 0 && IsRootless() {
|
||||
return
|
||||
}
|
||||
|
||||
var uidNum, gidNum uint64
|
||||
// Figure out who we are.
|
||||
me, err := user.Current()
|
||||
if !os.IsNotExist(err) {
|
||||
bailOnError(err, "error determining current user")
|
||||
uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
|
||||
bailOnError(err, "error parsing current UID %s", me.Uid)
|
||||
gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
|
||||
bailOnError(err, "error parsing current GID %s", me.Gid)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// ID mappings to use to reexec ourselves.
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
if uidNum != 0 || evenForRoot {
|
||||
// Read the set of ID mappings that we're allowed to use. Each
|
||||
// range in /etc/subuid and /etc/subgid file is a starting host
|
||||
// ID and a range size.
|
||||
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
|
||||
if err != nil {
|
||||
logrus.Warnf("error reading allowed ID mappings: %v", err)
|
||||
}
|
||||
if len(uidmap) == 0 {
|
||||
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
|
||||
}
|
||||
// Map our UID and GID, then the subuid and subgid ranges,
|
||||
// consecutively, starting at 0, to get the mappings to use for
|
||||
// a copy of ourselves.
|
||||
uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
|
||||
gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
|
||||
var rangeStart uint32
|
||||
for i := range uidmap {
|
||||
uidmap[i].ContainerID = rangeStart
|
||||
rangeStart += uidmap[i].Size
|
||||
}
|
||||
rangeStart = 0
|
||||
for i := range gidmap {
|
||||
gidmap[i].ContainerID = rangeStart
|
||||
rangeStart += gidmap[i].Size
|
||||
}
|
||||
} else {
|
||||
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
|
||||
// to use unshare(), so don't bother creating a new user namespace at this point.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
bailOnError(err, "error reading the current capabilities sets")
|
||||
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
|
||||
return
|
||||
}
|
||||
// Read the set of ID mappings that we're currently using.
|
||||
uidmap, gidmap, err = GetHostIDMappings("")
|
||||
bailOnError(err, "error reading current ID mappings")
|
||||
// Just reuse them.
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
|
||||
// Unlike most uses of reexec or unshare, we're using a name that
|
||||
// _won't_ be recognized as a registered reexec handler, since we
|
||||
// _want_ to fall through reexec.Init() to the normal main().
|
||||
cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
|
||||
|
||||
// If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
|
||||
err = os.Setenv(UsernsEnvName, "1")
|
||||
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
|
||||
|
||||
// Set the default isolation type to use the "rootless" method.
|
||||
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
|
||||
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse our stdio.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up a new user namespace with the ID mapping.
|
||||
cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
|
||||
cmd.UseNewuidmap = uidNum != 0
|
||||
cmd.UidMappings = uidmap
|
||||
cmd.UseNewgidmap = uidNum != 0
|
||||
cmd.GidMappings = gidmap
|
||||
cmd.GidMappingsEnableSetgroups = true
|
||||
|
||||
// Finish up.
|
||||
logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
|
||||
ExecRunnable(cmd)
|
||||
}
|
||||
|
||||
// ExecRunnable runs the specified unshare command, captures its exit status,
|
||||
// and exits with the same status.
|
||||
func ExecRunnable(cmd Runnable) {
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
|
||||
if exitError.ProcessState.Exited() {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(waitStatus.ExitStatus())
|
||||
}
|
||||
if waitStatus.Signaled() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(int(waitStatus.Signal()) + 128)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Errorf("%v", err)
|
||||
logrus.Errorf("(unable to determine exit status)")
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// getHostIDMappings reads mappings from the named node under /proc.
|
||||
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
|
||||
var mappings []specs.LinuxIDMapping
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
|
||||
}
|
||||
cid, err := strconv.ParseUint(fields[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
|
||||
}
|
||||
hid, err := strconv.ParseUint(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
|
||||
}
|
||||
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
if pid == "" {
|
||||
pid = "self"
|
||||
}
|
||||
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
|
||||
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
mappings, err := idtools.NewIDMappings(user, group)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
|
||||
}
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
for _, m := range mappings.UIDs() {
|
||||
uidmap = append(uidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
for _, m := range mappings.GIDs() {
|
||||
gidmap = append(gidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build linux,cgo,!gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build linux,cgo,gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall -Wextra
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// This next bit is straight out of libcontainer.
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
69
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
69
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/blang/semver v3.5.0
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containernetworking/cni v0.7.0-rc2
|
||||
github.com/containers/image v2.0.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.1
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/containers/storage v1.12.10
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
|
||||
github.com/docker/docker-credential-helpers v0.6.1
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.3.2
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83
|
||||
github.com/fsouza/go-dockerclient v1.3.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gogo/protobuf v1.2.0
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/gorilla/mux v1.6.2
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/imdario/mergo v0.3.6
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.8.3
|
||||
github.com/mistifyio/go-zfs v2.1.1
|
||||
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# TODO: Gotty has not been updated since 2012. Can we find a replacement?
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools v0.8.0
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/openshift/imagebuilder v1.1.0
|
||||
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/seccomp/libseccomp-golang v0.9.0
|
||||
github.com/seccomp/containers-golang v0.1
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
|
||||
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
14
vendor/github.com/containers/image/README.md
generated
vendored
14
vendor/github.com/containers/image/README.md
generated
vendored
@@ -25,7 +25,7 @@ them as necessary, and to sign and verify images.
|
||||
The containers/image project is only a library with no user interface;
|
||||
you can either incorporate it into your Go programs, or use the `skopeo` tool:
|
||||
|
||||
The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
|
||||
The [skopeo](https://github.com/containers/skopeo) tool uses the
|
||||
containers/image library and takes advantage of many of its features,
|
||||
e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
|
||||
|
||||
@@ -42,7 +42,7 @@ What this project tests against dependencies-wise is located
|
||||
## Building
|
||||
|
||||
If you want to see what the library can do, or an example of how it is called,
|
||||
consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool
|
||||
consider starting with the [skopeo](https://github.com/containers/skopeo) tool
|
||||
instead.
|
||||
|
||||
To integrate this library into your project, put it into `$GOPATH` or use
|
||||
@@ -53,7 +53,7 @@ are also available
|
||||
|
||||
This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
|
||||
```sh
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
|
||||
@@ -65,13 +65,17 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## Contributing
|
||||
## [Contributing](CONTRIBUTING.md)
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
|
||||
|
||||
## License
|
||||
|
||||
ASL 2.0
|
||||
Apache License 2.0
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
## Contact
|
||||
|
||||
|
||||
400
vendor/github.com/containers/image/copy/copy.go
generated
vendored
400
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -2,36 +2,50 @@ package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
"github.com/vbauerster/mpb"
|
||||
"github.com/vbauerster/mpb/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
@@ -64,6 +78,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -71,22 +86,24 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
copiedBlobs map[digest.Digest]digest.Digest
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
@@ -103,8 +120,9 @@ type Options struct {
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
@@ -122,7 +140,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
@@ -132,7 +150,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
@@ -140,76 +158,108 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
}()
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
}
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
c := &copier{
|
||||
copiedBlobs: make(map[digest.Digest]digest.Digest),
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
copyInParallel: copyInParallel,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
// The simple case: Just copy a single image.
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// This is a manifest list. Choose a single image and copy it.
|
||||
// FIXME: Copy to destinations which support manifest lists, one image at a time.
|
||||
instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) {
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
}
|
||||
if multiImage {
|
||||
return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
// (the multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
return nil, errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
sourceManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading manifest from source image")
|
||||
}
|
||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
@@ -219,14 +269,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.Signatures(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
return nil, errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
return nil, errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,32 +285,39 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0,
|
||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||
}
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err := ic.copyUpdatedConfigAndManifest(ctx)
|
||||
manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -268,14 +325,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
@@ -291,29 +348,29 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifest = attemptedManifest
|
||||
manifestBytes = attemptedManifest
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifest, options.SignBy)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs); err != nil {
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifestBytes, nil
|
||||
}
|
||||
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
@@ -347,6 +404,9 @@ func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.Sy
|
||||
|
||||
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
|
||||
func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
if ic.c.dest.IgnoresEmbeddedDockerReference() {
|
||||
return nil // Destination would prefer us not to update the embedded reference.
|
||||
}
|
||||
destRef := ic.c.dest.Reference().DockerReference()
|
||||
if destRef == nil {
|
||||
return nil // Destination does not care about Docker references
|
||||
@@ -363,11 +423,18 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
return terminal.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
destInfos := []types.BlobInfo{}
|
||||
diffIDs := []digest.Digest{}
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -380,30 +447,70 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
}
|
||||
for _, srcLayer := range srcInfos {
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
|
||||
type copyLayerData struct {
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
var copySemaphore *semaphore.Weighted
|
||||
if ic.c.copyInParallel {
|
||||
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
|
||||
} else {
|
||||
copySemaphore = semaphore.NewWeighted(int64(1))
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
destInfo = srcLayer
|
||||
ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
} else {
|
||||
destInfo, diffID, err = ic.copyLayer(ctx, srcLayer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
|
||||
}
|
||||
destInfos = append(destInfos, destInfo)
|
||||
diffIDs = append(diffIDs, diffID)
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
copyGroup.Wait()
|
||||
}()
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
@@ -466,18 +573,67 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// newProgressPool creates a *mpb.Progress and a cleanup function.
|
||||
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
|
||||
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
|
||||
return pool, func() {
|
||||
cancel()
|
||||
pool.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
bar := pool.AddBar(info.Size,
|
||||
mpb.BarClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(prefix),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
|
||||
),
|
||||
)
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
c.Printf("Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true)
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
bar.SetTotal(int64(len(configBlob)), true)
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
@@ -495,57 +651,57 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
||||
bar.SetTotal(0, true)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
var diffIDResult diffIDResult // = {digest:""}
|
||||
|
||||
diffID := cachedDiffID
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult = <-diffIDChan:
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
}
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
return blobInfo, diffID, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
@@ -553,7 +709,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -577,7 +733,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@@ -601,6 +757,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer s.Close()
|
||||
stream = s
|
||||
}
|
||||
|
||||
@@ -613,14 +770,14 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
// === Process input through digestingReader to validate against the expected digest.
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
@@ -636,16 +793,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
// === Report progress using a pb.Reader.
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.Output = c.reportWriter
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer bar.Finish()
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
@@ -656,8 +804,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
@@ -670,14 +820,18 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
destStream, err = decompressor(destStream)
|
||||
compressionOperation = types.Decompress
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer s.Close()
|
||||
destStream = s
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
@@ -693,7 +847,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
@@ -716,6 +870,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
@@ -726,7 +896,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := gzip.NewWriter(dest)
|
||||
zipper := pgzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
|
||||
39
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
39
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -117,13 +117,26 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -162,27 +175,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -48,8 +48,15 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, manifest.GuessMIMEType(m), err
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dirImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
|
||||
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
||||
6
vendor/github.com/containers/image/docker/daemon/client.go
generated
vendored
6
vendor/github.com/containers/image/docker/daemon/client.go
generated
vendored
@@ -30,13 +30,13 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
//
|
||||
// Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set
|
||||
// TLSClientConfig to nil. This can be achieved by using the form `http://`
|
||||
proto, _, _, err := dockerclient.ParseHost(host)
|
||||
url, err := dockerclient.ParseHostURL(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var httpClient *http.Client
|
||||
if proto != "unix" {
|
||||
if proto == "http" {
|
||||
if url.Scheme != "unix" {
|
||||
if url.Scheme == "http" {
|
||||
httpClient = httpConfig()
|
||||
} else {
|
||||
hc, err := tlsConfig(sys)
|
||||
|
||||
296
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
296
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -13,15 +13,17 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -69,30 +71,40 @@ type extensionSignatureList struct {
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
|
||||
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
||||
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
||||
tlsClientConfig *tls.Config
|
||||
// The following members are not set by newDockerClient and must be set by callers if needed.
|
||||
username string
|
||||
password string
|
||||
client *http.Client
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
client *http.Client
|
||||
scheme string
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||
token *bearerToken
|
||||
tokenExpiration time.Time
|
||||
|
||||
// Private state for setupRequestAuth (key: string, value: bearerToken)
|
||||
tokenCache sync.Map
|
||||
// Private state for detectProperties:
|
||||
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
|
||||
detectPropertiesError error // detectPropertiesError caches the initial error.
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
@@ -100,6 +112,19 @@ type authScope struct {
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
type sendAuth int
|
||||
|
||||
const (
|
||||
// v2 endpoint with authentication.
|
||||
v2Auth sendAuth = iota
|
||||
// v1 endpoint with authentication.
|
||||
// TODO: Get v1Auth working
|
||||
// v1Auth
|
||||
// no authentication, works for both v1 and v2.
|
||||
noAuth
|
||||
)
|
||||
|
||||
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
token := new(bearerToken)
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
@@ -115,6 +140,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
@@ -173,7 +199,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
@@ -181,19 +207,31 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteName := reference.Path(ref.ref)
|
||||
|
||||
return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
|
||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
client.signatureBase = sigBase
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
|
||||
func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
|
||||
// newDockerClient returns a new dockerClient instance for the given registry
|
||||
// and reference. The reference is used to query the registry configuration
|
||||
// and can either be a registry (e.g, "registry.com[:5000]"), a repository
|
||||
// (e.g., "registry.com[:5000][/some/namespace]/repo").
|
||||
// Please note that newDockerClient does not set all members of dockerClient
|
||||
// (e.g., username and password); those must be set by callers if necessary.
|
||||
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = serverDefault()
|
||||
tlsClientConfig := serverDefault()
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
@@ -204,37 +242,40 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerInsecureSkipTLSVerify {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = true
|
||||
// Check if TLS verification shall be skipped (default=false) which can
|
||||
// be specified in the sysregistriesv2 configuration.
|
||||
skipVerify := false
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
username: username,
|
||||
password: password,
|
||||
client: &http.Client{Transport: tr},
|
||||
signatureBase: sigBase,
|
||||
scope: authScope{
|
||||
actions: actions,
|
||||
remoteName: remoteName,
|
||||
},
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occcured while making the http request or the status code received was 401
|
||||
// returns an error if an error occurred while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +287,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorizedForCredentials
|
||||
default:
|
||||
return errors.Errorf("error occured with status code %q", resp.StatusCode)
|
||||
return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,25 +321,65 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
v2Res := &V2Results{}
|
||||
v1Res := &V1Results{}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
|
||||
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
|
||||
if registry == dockerHostname {
|
||||
registry = dockerV1Hostname
|
||||
// Get credentials from authfile for the underlying hostname
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
|
||||
client, err := newDockerClientWithDetails(sys, registry, "", "", "", nil, "")
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
// the call made to that endpoint will fail. So using the v1 hostname
|
||||
// for docker.io for simplicity of implementation and the fact that it
|
||||
// returns search results.
|
||||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
hostname = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil)
|
||||
// Only try the v1 search endpoint if the search query is not empty. If it is
|
||||
// empty skip to the v2 endpoint.
|
||||
if image != "" {
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
return nil, err
|
||||
@@ -316,50 +397,25 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
}
|
||||
}
|
||||
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err = client.makeRequest(ctx, "GET", u.String(), nil, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -377,8 +433,8 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
if sendAuth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -397,7 +453,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
|
||||
//
|
||||
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
|
||||
if len(c.challenges) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -409,24 +465,27 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "bearer":
|
||||
if c.token == nil || time.Now().After(c.tokenExpiration) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
var scope string
|
||||
if c.scope.remoteName != "" && c.scope.actions != "" {
|
||||
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
}
|
||||
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
t, inCache := c.tokenCache.Load(cacheKey)
|
||||
if inCache {
|
||||
token = t.(bearerToken)
|
||||
}
|
||||
if !inCache || time.Now().After(token.expirationTime) {
|
||||
t, err := c.getBearerToken(req.Context(), challenge, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.token = token
|
||||
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
token = *t
|
||||
c.tokenCache.Store(cacheKey, token)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
|
||||
return nil
|
||||
default:
|
||||
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
|
||||
@@ -436,23 +495,34 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authReq = authReq.WithContext(ctx)
|
||||
getParams := authReq.URL.Query()
|
||||
if service != "" {
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
authReq.URL.RawQuery = getParams.Encode()
|
||||
if c.username != "" && c.password != "" {
|
||||
authReq.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
// TODO(runcom): insecure for now to contact the external token service
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
@@ -468,7 +538,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -478,24 +548,29 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
// it at most once.
|
||||
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
// We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
|
||||
// specified by the system context
|
||||
if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = c.tlsClientConfig
|
||||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode)
|
||||
return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
@@ -503,7 +578,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
err := ping("https")
|
||||
if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if err != nil && c.tlsClientConfig.InsecureSkipVerify {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -514,9 +589,9 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
@@ -527,7 +602,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
@@ -537,11 +612,18 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
|
||||
return c.detectPropertiesError
|
||||
}
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
6
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -25,7 +25,7 @@ type Image struct {
|
||||
// a client to the registry hosting the given image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
|
||||
s, err := newImageSource(sys, ref)
|
||||
s, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,14 +66,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
||||
var tagsHolder struct {
|
||||
|
||||
199
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
199
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -12,9 +12,11 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
@@ -95,6 +97,13 @@ func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
|
||||
}
|
||||
|
||||
// sizeCounter is an io.Writer which only counts the total size of its input.
|
||||
type sizeCounter struct{ size int64 }
|
||||
|
||||
@@ -103,27 +112,36 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() != "" {
|
||||
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -140,7 +158,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -153,13 +171,13 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -170,21 +188,17 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -195,17 +209,137 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
"mount": {srcDigest.String()},
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusCreated:
|
||||
logrus.Debugf("... mount OK")
|
||||
return nil
|
||||
case http.StatusAccepted:
|
||||
// Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
|
||||
// Abort, and let the ultimate caller do an upload when its ready, instead.
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
defer res2.Body.Close()
|
||||
if res2.StatusCode != http.StatusNoContent {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
// Anyway, if canceling the upload fails, ignore it and return the more important error:
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
|
||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
@@ -230,7 +364,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -257,14 +391,29 @@ func isManifestInvalidError(err error) bool {
|
||||
if !ok || len(errors) == 0 {
|
||||
return false
|
||||
}
|
||||
ec, ok := errors[0].(errcode.ErrorCoder)
|
||||
err = errors[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch ec.ErrorCode() {
|
||||
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
|
||||
case v2.ErrorCodeManifestInvalid:
|
||||
return true
|
||||
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
|
||||
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
|
||||
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
|
||||
case v2.ErrorCodeTagInvalid:
|
||||
return true
|
||||
// ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
|
||||
// uploading an OCI manifest that is (correctly, according to the spec) missing
|
||||
// a top-level media type. See libpod issue #1719
|
||||
// FIXME: remove this case when ECR behavior is fixed
|
||||
case errcode.ErrorCodeUnsupported:
|
||||
return strings.Contains(err.Error(), "Invalid JSON syntax")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
@@ -435,7 +584,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
106
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
106
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -30,15 +31,65 @@ type dockerImageSource struct {
|
||||
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
c, err := newDockerClientFromRef(sys, ref, false, "pull")
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries configuration")
|
||||
}
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so use the
|
||||
// equivalent of a default configuration.
|
||||
registry = &sysregistriesv2.Registry{
|
||||
Endpoint: sysregistriesv2.Endpoint{
|
||||
Location: ref.ref.String(),
|
||||
},
|
||||
Prefix: ref.ref.String(),
|
||||
}
|
||||
}
|
||||
|
||||
primaryDomain := reference.Domain(ref.ref)
|
||||
// Check all endpoints for the manifest availability. If we find one that does
|
||||
// contain the image, it will be used for all future pull actions. Always try the
|
||||
// non-mirror original location last; this both transparently handles the case
|
||||
// of no mirrors configured, and ensures we return the error encountered when
|
||||
// acessing the upstream location if all endpoints fail.
|
||||
manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
pullSources, err := registry.PullSourcesFromReference(ref.ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
c: c,
|
||||
}, nil
|
||||
for _, pullSource := range pullSources {
|
||||
logrus.Debugf("Trying to pull %q", pullSource.Reference)
|
||||
dockerRef, err := newReference(pullSource.Reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
|
||||
|
||||
testImageSource := &dockerImageSource{
|
||||
ref: dockerRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
|
||||
if manifestLoadErr == nil {
|
||||
return testImageSource, nil
|
||||
}
|
||||
}
|
||||
return nil, manifestLoadErr
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
@@ -89,7 +140,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -137,20 +188,20 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, false)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Body != nil && err == nil {
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, err
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
@@ -161,22 +212,30 @@ func getBlobSize(resp *http.Response) int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
@@ -274,7 +333,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -310,7 +369,14 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "push")
|
||||
// docker/distribution does not document what action should be used for deleting images.
|
||||
//
|
||||
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
|
||||
// quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included.
|
||||
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
|
||||
//
|
||||
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +391,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -347,7 +413,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil)
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@@ -61,8 +61,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref)
|
||||
}
|
||||
|
||||
// newReference returns a dockerReference for a named reference.
|
||||
func newReference(ref reference.Named) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
@@ -72,8 +77,9 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
}, nil
|
||||
@@ -135,7 +141,7 @@ func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContex
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(sys, ref)
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
||||
2
vendor/github.com/containers/image/docker/reference/README.md
generated
vendored
2
vendor/github.com/containers/image/docker/reference/README.md
generated
vendored
@@ -1,2 +1,2 @@
|
||||
This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8,
|
||||
This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8,
|
||||
except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.
|
||||
29
vendor/github.com/containers/image/docker/reference/normalize.go
generated
vendored
29
vendor/github.com/containers/image/docker/reference/normalize.go
generated
vendored
@@ -55,6 +55,35 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||
// mainly for backward compatibility.
|
||||
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||
func ParseDockerRef(ref string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := named.(NamedTagged); ok {
|
||||
if canonical, ok := named.(Canonical); ok {
|
||||
// The reference is both tagged and digested, only
|
||||
// return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCanonical, nil
|
||||
}
|
||||
}
|
||||
return TagNameOnly(named), nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
|
||||
4
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
4
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
@@ -15,7 +15,7 @@
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
@@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) {
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
if len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
|
||||
10
vendor/github.com/containers/image/docker/reference/regexp.go
generated
vendored
10
vendor/github.com/containers/image/docker/reference/regexp.go
generated
vendored
@@ -20,15 +20,15 @@ var (
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by domainRegexp
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// domainRegexp defines the structure of potential domain components
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
domainRegexp = expression(
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
@@ -51,14 +51,14 @@ var (
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(domainRegexp, literal(`/`)),
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(domainRegexp), literal(`/`)),
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
|
||||
49
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
49
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@@ -75,13 +75,26 @@ func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
@@ -113,12 +126,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, size, err := d.HasBlob(ctx, inputInfo)
|
||||
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
@@ -144,29 +157,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with
|
||||
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||
// the blob must also be returned. If the destination does not contain the
|
||||
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||
// returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||
// previously returned true would have been passed to PutBlob if it had
|
||||
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||
// it describes need to be applied again when composing a filesystem tree.
|
||||
func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
|
||||
|
||||
185
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
185
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -3,13 +3,13 @@ package tarfile
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -22,8 +22,10 @@ import (
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
tarPath string
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects
|
||||
tarManifest *ManifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
@@ -43,8 +45,7 @@ type layerInfo struct {
|
||||
// the source of an image.
|
||||
// To do for both the NewSourceFromFile and NewSourceFromStream functions
|
||||
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path
|
||||
// NewSourceFromFile supports both conpressed and uncompressed input
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path.
|
||||
func NewSourceFromFile(path string) (*Source, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
@@ -52,19 +53,24 @@ func NewSourceFromFile(path string) (*Source, error) {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader, err := gzip.NewReader(file)
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewSourceFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return &Source{
|
||||
tarPath: path,
|
||||
}, nil
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
return NewSourceFromStream(reader)
|
||||
return NewSourceFromStream(stream)
|
||||
}
|
||||
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream, which must be uncompressed.
|
||||
// The caller can close the inputStream immediately after NewSourceFromFile returns.
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
// FIXME: use SystemContext here.
|
||||
// Save inputStream to a temporary file
|
||||
@@ -81,8 +87,20 @@ func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
@@ -184,43 +202,46 @@ func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
func (s *Source) ensureCachedDataIsPresent() error {
|
||||
if s.tarManifest != nil {
|
||||
return nil
|
||||
}
|
||||
s.cacheDataLock.Do(func() {
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
return
|
||||
}
|
||||
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
}
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
}
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
})
|
||||
return s.cacheDataResult
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
@@ -292,7 +313,25 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
li.size = h.Size
|
||||
// Since GetBlob will decompress layers that are compressed we need
|
||||
// to do the decompression here as well, otherwise we will
|
||||
// incorrectly report the size. Pretty critical, since tools like
|
||||
// umoci always compress layer blobs. Obviously we only bother with
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
}
|
||||
}
|
||||
@@ -346,20 +385,33 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
type readCloseWrapper struct {
|
||||
// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
|
||||
type uncompressedReadCloser struct {
|
||||
io.Reader
|
||||
closeFunc func() error
|
||||
underlyingCloser func() error
|
||||
uncompressedCloser func() error
|
||||
}
|
||||
|
||||
func (r readCloseWrapper) Close() error {
|
||||
if r.closeFunc != nil {
|
||||
return r.closeFunc()
|
||||
func (r uncompressedReadCloser) Close() error {
|
||||
var res error
|
||||
if err := r.uncompressedCloser(); err != nil {
|
||||
res = err
|
||||
}
|
||||
return nil
|
||||
if err := r.underlyingCloser(); err != nil && res == nil {
|
||||
res = err
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *Source) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -369,10 +421,16 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
stream, err := s.openTarComponent(li.path)
|
||||
underlyingStream, err := s.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
closeUnderlyingStream := true
|
||||
defer func() {
|
||||
if closeUnderlyingStream {
|
||||
underlyingStream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||
// caller which is trying to verify the blob will run into problems),
|
||||
@@ -386,22 +444,17 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
// be verifing a "digest" which is not the actual layer's digest (but
|
||||
// is instead the DiffID).
|
||||
|
||||
decompressFunc, reader, err := compression.DetectCompression(stream)
|
||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
||||
return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
|
||||
}
|
||||
|
||||
if decompressFunc != nil {
|
||||
reader, err = decompressFunc(reader)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
newStream := readCloseWrapper{
|
||||
Reader: reader,
|
||||
closeFunc: stream.Close,
|
||||
newStream := uncompressedReadCloser{
|
||||
Reader: uncompressedStream,
|
||||
underlyingCloser: underlyingStream.Close,
|
||||
uncompressedCloser: uncompressedStream.Close,
|
||||
}
|
||||
closeUnderlyingStream = false
|
||||
|
||||
return newStream, li.size, nil
|
||||
}
|
||||
|
||||
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
% containers-certs.d(5)
|
||||
|
||||
# NAME
|
||||
containers-certs.d - Directory for storing custom container-registry TLS configurations
|
||||
|
||||
# DESCRIPTION
|
||||
A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`.
|
||||
The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`).
|
||||
|
||||
## Directory Structure
|
||||
A certs directory can contain one or more files with the following extensions:
|
||||
|
||||
* `*.crt` files with this extensions will be interpreted as CA certificates
|
||||
* `*.cert` files with this extensions will be interpreted as client certificates
|
||||
* `*.key` files with this extensions will be interpreted as client keys
|
||||
|
||||
Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`).
|
||||
An examplary setup for a registry running at `my-registry.com:5000` may look as follows:
|
||||
```
|
||||
/etc/containers/certs.d/ <- Certificate directory
|
||||
└── my-registry.com:5000 <- Hostname:port
|
||||
├── client.cert <- Client certificate
|
||||
├── client.key <- Client key
|
||||
└── ca.crt <- Certificate authority that signed the registry certificate
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Feb 2019, Originally compiled by Valentin Rothberg <rothberg@redhat.com>
|
||||
@@ -1,9 +1,9 @@
|
||||
% POLICY.JSON(5) policy.json Man Page
|
||||
% CONTAINERS-POLICY.JSON(5) policy.json Man Page
|
||||
% Miloslav Trmač
|
||||
% September 2016
|
||||
|
||||
# NAME
|
||||
policy.json - syntax for the signature verification policy file
|
||||
containers-policy.json - syntax for the signature verification policy file
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
@@ -278,4 +278,6 @@ selectively allow individual transports and scopes as desired.
|
||||
atomic(1)
|
||||
|
||||
## HISTORY
|
||||
August 2018, Rename to containers-policy.json(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
|
||||
177
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
177
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMATS
|
||||
|
||||
## VERSION 2
|
||||
VERSION 2 is the latest format of the `registries.conf` and is currently in
|
||||
beta. This means in general VERSION 1 should be used in production environments
|
||||
for now.
|
||||
|
||||
### GLOBAL SETTINGS
|
||||
|
||||
`unqualified-search-registries`
|
||||
: An array of _host_[`:`_port_] registries to try when pulling an unqualified image, in order.
|
||||
|
||||
### NAMESPACED `[[registry]]` SETTINGS
|
||||
|
||||
The bulk of the configuration is represented as an array of `[[registry]]`
|
||||
TOML tables; the settings may therefore differ among different registries
|
||||
as well as among different namespaces/repositories within a registry.
|
||||
|
||||
#### Choosing a `[[registry]]` TOML table
|
||||
|
||||
Given an image name, a single `[[registry]]` TOML table is chosen based on its `prefix` field.
|
||||
|
||||
`prefix`
|
||||
: A prefix of the user-specified image name, i.e. using one of the following formats:
|
||||
- _host_[`:`_port_]
|
||||
- _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]
|
||||
- _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_
|
||||
- _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_(`:`_tag|`@`_digest_)
|
||||
|
||||
The user-specified image name must start with the specified `prefix` (and continue
|
||||
with the appropriate separator) for a particular `[[registry]]` TOML table to be
|
||||
considered; (only) the TOML table with the longest match is used.
|
||||
|
||||
As a special case, the `prefix` field can be missing; if so, it defaults to the value
|
||||
of the `location` field (described below).
|
||||
|
||||
#### Per-namespace settings
|
||||
|
||||
`insecure`
|
||||
: `true` or `false`.
|
||||
By default, container runtimes require TLS when retrieving images from a registry.
|
||||
If `insecure` is set to `true`, unencrypted HTTP as well as TLS connections with untrusted
|
||||
certificates are allowed.
|
||||
|
||||
`blocked`
|
||||
: `true` or `false`.
|
||||
If `true`, pulling images with matching names is forbidden.
|
||||
|
||||
#### Remapping and mirroring registries
|
||||
|
||||
The user-specified image reference is, primarily, a "logical" image name, always used for naming
|
||||
the image. By default, the image reference also directly specifies the registry and repository
|
||||
to use, but the following options can be used to redirect the underlying accesses
|
||||
to different registry servers or locations (e.g. to support configurations with no access to the
|
||||
internet without having to change `Dockerfile`s, or to add redundancy).
|
||||
|
||||
`location`
|
||||
: Accepts the same format as the `prefix` field, and specifies the physical location
|
||||
of the `prefix`-rooted namespace.
|
||||
|
||||
By default, this equal to `prefix` (in which case `prefix` can be omitted and the
|
||||
`[[registry]]` TOML table can only specify `location`).
|
||||
|
||||
Example: Given
|
||||
```
|
||||
prefix = "example.com/foo"
|
||||
location = "internal-registry-for-example.net/bar"
|
||||
```
|
||||
requests for the image `example.com/foo/myimage:latest` will actually work with the
|
||||
`internal-registry-for-example.net/bar/myimage:latest` image.
|
||||
|
||||
`mirror`
|
||||
: An array of TOML tables specifiying (possibly-partial) mirrors for the
|
||||
`prefix`-rooted namespace.
|
||||
|
||||
The mirrors are attempted in the specified order; the first one that can be
|
||||
contacted and contains the image will be used (and if none of the mirrors contains the image,
|
||||
the primary location specified by the `registry.location` field, or using the unmodified
|
||||
user-specified reference, is tried last).
|
||||
|
||||
Each TOML table in the `mirror` array can contain the following fields, with the same semantics
|
||||
as if specified in the `[[registry]]` TOML table directly:
|
||||
- `location`
|
||||
- `insecure`
|
||||
|
||||
`mirror-by-digest-only`
|
||||
: `true` or `false`.
|
||||
If `true`, mirrors will only be used during pulling if the image reference includes a digest.
|
||||
Referencing an image by digest ensures that the same is always used
|
||||
(whereas referencing an image by a tag may cause different registries to return
|
||||
different images if the tag mapping is out of sync).
|
||||
|
||||
Note that if this is `true`, images referenced by a tag will only use the primary
|
||||
registry, failing if that registry is not accessible.
|
||||
|
||||
*Note*: Redirection and mirrors are currently processed only when reading images, not when pushing
|
||||
to a registry; that may change in the future.
|
||||
|
||||
### EXAMPLE
|
||||
|
||||
```
|
||||
unqualified-search-registries = ["example.com"]
|
||||
|
||||
[[registry]]
|
||||
prefix = "example.com/foo"
|
||||
insecure = false
|
||||
blocked = false
|
||||
location = "internal-registry-for-example.com/bar"
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "example-mirror-0.local/mirror-for-foo"
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "example-mirror-1.local/mirrors/foo"
|
||||
insecure = true
|
||||
```
|
||||
Given the above, a pull of `example.com/foo/image:latest` will try:
|
||||
1. `example-mirror-0.local/mirror-for-foo/image:latest`
|
||||
2. `example-mirror-1.local/mirrors/foo/image:latest`
|
||||
3. `internal-registry-for-example.net/bar/myimage:latest`
|
||||
|
||||
in order, and use the first one that exists.
|
||||
|
||||
## VERSION 1
|
||||
VERSION 1 can be used as alternative to the VERSION 2, but it does not support
|
||||
using registry mirrors, longest-prefix matches, or location rewriting.
|
||||
|
||||
The TOML format is used to build a simple list of registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Note that insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
The `registries.insecure` and `registries.block` lists have the same meaning as the
|
||||
`insecure` and `blocked` fields in VERSION 2.
|
||||
|
||||
### EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Mar 2019, Added additional configuration format by Sascha Grunert <sgrunert@suse.com>
|
||||
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
@@ -1,7 +1,11 @@
|
||||
% REGISTRIES.D(5) Registries.d Man Page
|
||||
% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page
|
||||
% Miloslav Trmač
|
||||
% August 2016
|
||||
# Registries Configuration Directory
|
||||
|
||||
# NAME
|
||||
containers-registries.d - Directory for various registries configurations
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The registries configuration directory contains configuration for various registries
|
||||
(servers storing remote container images), and for content stored in them,
|
||||
@@ -1,10 +1,10 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% container-signature(5) Container signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
# Container signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
This document describes the format of container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
@@ -21,7 +21,7 @@ an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
A container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
@@ -34,7 +34,7 @@ associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
As distributed, the container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
@@ -46,7 +46,7 @@ that this is not necessary and the configured expected public key provides anoth
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
Consumers of container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
@@ -89,7 +89,7 @@ and if the semantics of the invalid document, as created by such an implementati
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
The `critical` object MUST contain a `type` member identifying the document as a container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
@@ -210,7 +210,7 @@ Consumers still SHOULD reject any signature where a member of an `optional` obje
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
The contents of this string is not defined in detail; however each implementation creating container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
@@ -221,7 +221,7 @@ The contents of this string is not defined in detail; however each implementatio
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page
|
||||
% Valentin Rothberg
|
||||
% April 2019
|
||||
|
||||
## NAME
|
||||
|
||||
containers-transports - description of supported transports for copying and storing container images
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations.
|
||||
The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below.
|
||||
|
||||
### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]}
|
||||
|
||||
An image located in a local containers storage.
|
||||
The format of _docker-reference_ is described in detail in the **docker** transport.
|
||||
|
||||
The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory.
|
||||
The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored.
|
||||
The optional `options` are a comma-separated list of driver-specific options.
|
||||
Please refer to containers-storage.conf(5) for further information on the drivers and supported options.
|
||||
|
||||
### **dir:**_path_
|
||||
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files.
|
||||
This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
### **docker://**_docker-reference_
|
||||
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2".
|
||||
By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1).
|
||||
If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1).
|
||||
The containers-registries.conf(5) further allows for configuring various settings of a registry.
|
||||
|
||||
Note that a _docker-reference_ has the following format: `name[:tag|@digest]`.
|
||||
While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do.
|
||||
Digests can also be used in an image destination as long as the manifest matches the provided digest.
|
||||
The digest of images can be explored with skopeo-inspect(1).
|
||||
If `name` does not contain a slash, it is treated as `docker.io/library/name`.
|
||||
Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost).
|
||||
If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`.
|
||||
|
||||
### **docker-archive:**_path[:docker-reference]_
|
||||
|
||||
An image is stored in the docker-save(1) formatted file.
|
||||
_docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable.
|
||||
|
||||
### **docker-daemon:**_docker-reference|algo:digest_
|
||||
|
||||
An image stored in the docker daemon's internal storage.
|
||||
The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source.
|
||||
The _algo:digest_ refers to the image ID reported by docker-inspect(1).
|
||||
|
||||
### **oci:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" at _path_.
|
||||
Using a _tag_ is optional and allows for storing multiple images at the same _path_.
|
||||
|
||||
### **oci-archive:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_.
|
||||
|
||||
### **ostree:**_docker-reference[@/absolute/repo/path]_
|
||||
|
||||
An image in the local ostree(1) repository.
|
||||
_/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
## Examples
|
||||
|
||||
The following examples demonstrate how some of the containers transports can be used.
|
||||
The examples use skopeo-copy(1) for copying container images.
|
||||
|
||||
**Copying an image from one registry to another**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest
|
||||
```
|
||||
|
||||
**Copying an image from a running Docker daemon to a directory in the OCI layout**:
|
||||
```
|
||||
$ mkdir alpine-oci
|
||||
$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci
|
||||
$ tree alpine-oci
|
||||
test-oci/
|
||||
├── blobs
|
||||
│ └── sha256
|
||||
│ ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7
|
||||
│ ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc
|
||||
│ └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7
|
||||
├── index.json
|
||||
└── oci-layout
|
||||
|
||||
2 directories, 5 files
|
||||
```
|
||||
|
||||
**Copying an image from a registry to the local storage**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
Valentin Rothberg <rothberg@redhat.com>
|
||||
41
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
41
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
@@ -1,41 +0,0 @@
|
||||
% registries.conf(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The REGISTRIES configuration file is a system-wide configuration file for container image
|
||||
registries. The file format is TOML.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build simple list format for registries under two
|
||||
categories: `search` and `insecure`. You can list multiple registries using
|
||||
as a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the
|
||||
registries listed under search.
|
||||
|
||||
The following example configuration defines two searchable registries and one
|
||||
insecure registry.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ["registry1.com", "registry2.com"]
|
||||
|
||||
[registries.insecure]
|
||||
registries = ["registry3.com"]
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@@ -2,7 +2,6 @@ package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -25,8 +24,12 @@ func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
|
||||
}
|
||||
|
||||
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
|
||||
return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) {
|
||||
m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema1{m: m}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) serialize() ([]byte, error) {
|
||||
@@ -64,7 +67,7 @@ func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, erro
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@@ -148,12 +151,12 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife
|
||||
|
||||
// Based on github.com/docker/docker/distribution/pull_v2.go
|
||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
|
||||
if len(m.m.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
if len(m.m.ExtractedV1Compatibility) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.m.History) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
|
||||
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
|
||||
}
|
||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
|
||||
@@ -165,14 +168,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
// Build a list of the diffIDs for the non-empty layers.
|
||||
diffIDs := []digest.Digest{}
|
||||
var layers []manifest.Schema2Descriptor
|
||||
for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.History) - 1) - v1Index
|
||||
for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index
|
||||
|
||||
var v1compat manifest.Schema1V1Compatibility
|
||||
if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||
}
|
||||
if !v1compat.ThrowAway {
|
||||
if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway {
|
||||
var size int64
|
||||
if uploadedLayerInfos != nil {
|
||||
size = uploadedLayerInfos[v2Index].Size
|
||||
|
||||
32
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
32
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -18,17 +19,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
|
||||
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
|
||||
// in registries, so let’s use the same values.
|
||||
var gzippedEmptyLayer = []byte{
|
||||
var GzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
|
||||
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
|
||||
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
|
||||
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
type manifestSchema2 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
@@ -95,11 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.ConfigDescriptor.Digest,
|
||||
Size: m.m.ConfigDescriptor.Size,
|
||||
URLs: m.m.ConfigDescriptor.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,7 +118,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@@ -253,16 +250,18 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}, false)
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
if info.Digest != gzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
if info.Digest != GzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
blobDigest = gzippedEmptyLayerDigest
|
||||
blobDigest = GzippedEmptyLayerDigest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
|
||||
@@ -308,7 +307,10 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
}
|
||||
history[0].V1Compatibility = string(v1Config)
|
||||
|
||||
m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
if err != nil {
|
||||
return nil, err // This should never happen, we should have created all the components correctly.
|
||||
}
|
||||
return memoryImageFromManifest(m1), nil
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
@@ -62,3 +62,12 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
}
|
||||
}
|
||||
|
||||
// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
|
||||
func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
|
||||
blobs := make([]types.BlobInfo, len(layers))
|
||||
for i, layer := range layers {
|
||||
blobs[i] = layer.BlobInfo
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
9
vendor/github.com/containers/image/image/oci.go
generated
vendored
9
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -60,11 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.Config.Digest,
|
||||
Size: m.m.Config.Size,
|
||||
URLs: m.m.Config.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -101,7 +98,7 @@ func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error)
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
|
||||
121
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
121
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
@@ -25,25 +25,28 @@ type Schema1History struct {
|
||||
|
||||
// Schema1 is a manifest in docker/distribution schema 1.
|
||||
type Schema1 struct {
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility!
|
||||
ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image)
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
}
|
||||
|
||||
type schema1V1CompatibilityContainerConfig struct {
|
||||
Cmd []string
|
||||
}
|
||||
|
||||
// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
|
||||
type Schema1V1Compatibility struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig struct {
|
||||
Cmd []string
|
||||
} `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
|
||||
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
|
||||
@@ -57,11 +60,8 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
if s1.SchemaVersion != 1 {
|
||||
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
||||
}
|
||||
if len(s1.FSLayers) != len(s1.History) {
|
||||
return nil, errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(s1.FSLayers) == 0 {
|
||||
return nil, errors.New("no FSLayers in manifest")
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s1.fixManifestLayers(); err != nil {
|
||||
return nil, err
|
||||
@@ -70,7 +70,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
}
|
||||
|
||||
// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) {
|
||||
var name, tag string
|
||||
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
||||
name = reference.Path(ref)
|
||||
@@ -78,7 +78,7 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
}
|
||||
return &Schema1{
|
||||
s1 := Schema1{
|
||||
Name: name,
|
||||
Tag: tag,
|
||||
Architecture: architecture,
|
||||
@@ -86,6 +86,10 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
History: history,
|
||||
SchemaVersion: 1,
|
||||
}
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s1, nil
|
||||
}
|
||||
|
||||
// Schema1Clone creates a copy of the supplied Schema1 manifest.
|
||||
@@ -94,31 +98,51 @@ func Schema1Clone(src *Schema1) *Schema1 {
|
||||
return ©
|
||||
}
|
||||
|
||||
// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest.
|
||||
func (m *Schema1) initialize() error {
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(m.FSLayers) == 0 {
|
||||
return errors.New("no FSLayers in manifest")
|
||||
}
|
||||
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
|
||||
for i, h := range m.History {
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{}
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema1) LayerInfos() []types.BlobInfo {
|
||||
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||
func (m *Schema1) LayerInfos() []LayerInfo {
|
||||
layers := make([]LayerInfo, len(m.FSLayers))
|
||||
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
||||
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||
layers[(len(m.FSLayers)-1)-i] = LayerInfo{
|
||||
BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1},
|
||||
EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway,
|
||||
}
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
|
||||
if len(m.FSLayers) != len(layerInfos) {
|
||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
|
||||
}
|
||||
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
|
||||
@@ -144,31 +168,19 @@ func (m *Schema1) Serialize() ([]byte, error) {
|
||||
// Note that even after this succeeds, m.FSLayers may contain duplicate entries
|
||||
// (for Dockerfile operations which change the configuration but not the filesystem).
|
||||
func (m *Schema1) fixManifestLayers() error {
|
||||
type imageV1 struct {
|
||||
ID string
|
||||
Parent string
|
||||
}
|
||||
// Per the specification, we can assume that len(m.FSLayers) == len(m.History)
|
||||
imgs := make([]*imageV1, len(m.FSLayers))
|
||||
for i := range m.FSLayers {
|
||||
img := &imageV1{}
|
||||
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := validateV1ID(img.ID); err != nil {
|
||||
// m.initialize() has verified that len(m.FSLayers) == len(m.History)
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
if err := validateV1ID(compat.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if imgs[len(imgs)-1].Parent != "" {
|
||||
if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" {
|
||||
return errors.New("Invalid parent ID in the base layer of the image")
|
||||
}
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
for _, img := range m.ExtractedV1Compatibility {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
@@ -177,12 +189,13 @@ func (m *Schema1) fixManifestLayers() error {
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
|
||||
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -209,7 +222,7 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
|
||||
DockerVersion: s1.DockerVersion,
|
||||
Architecture: s1.Architecture,
|
||||
Os: s1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s1.Config != nil {
|
||||
i.Labels = s1.Config.Labels
|
||||
@@ -240,11 +253,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
}
|
||||
// Build the history.
|
||||
convertedHistory := []Schema2History{}
|
||||
for _, h := range m.History {
|
||||
compat := Schema1V1Compatibility{}
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "error decoding history information")
|
||||
}
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
hitem := Schema2History{
|
||||
Created: compat.Created,
|
||||
CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
|
||||
@@ -266,7 +275,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
raw := make(map[string]*json.RawMessage)
|
||||
err = json.Unmarshal(config, &raw)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s1)
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1)
|
||||
}
|
||||
// Drop some fields.
|
||||
delete(raw, "id")
|
||||
|
||||
32
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
32
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
@@ -18,6 +18,16 @@ type Schema2Descriptor struct {
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
|
||||
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
|
||||
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// Schema2 is a manifest in docker/distribution schema 2.
|
||||
type Schema2 struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
@@ -47,8 +57,9 @@ type Schema2HealthConfig struct {
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
@@ -171,19 +182,18 @@ func Schema2Clone(src *Schema2) *Schema2 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema2) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||
return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema2) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *Schema2) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.LayersDescriptors {
|
||||
blobs = append(blobs, types.BlobInfo{
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
URLs: layer.URLs,
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
@@ -227,7 +237,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
||||
DockerVersion: s2.DockerVersion,
|
||||
Architecture: s2.Architecture,
|
||||
Os: s2.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s2.Config != nil {
|
||||
i.Labels = s2.Config.Labels
|
||||
|
||||
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@@ -50,10 +50,10 @@ var DefaultRequestedManifestMIMETypes = []string{
|
||||
type Manifest interface {
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
ConfigInfo() types.BlobInfo
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
LayerInfos() []types.BlobInfo
|
||||
LayerInfos() []LayerInfo
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
UpdateLayerInfos(layerInfos []types.BlobInfo) error
|
||||
|
||||
@@ -73,6 +73,12 @@ type Manifest interface {
|
||||
Serialize() ([]byte, error)
|
||||
}
|
||||
|
||||
// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos.
|
||||
type LayerInfo struct {
|
||||
types.BlobInfo
|
||||
EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||
}
|
||||
|
||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
@@ -227,9 +233,9 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func LayerInfosToStrings(infos []types.BlobInfo) []string {
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
layers := make([]string, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i] = info.Digest.String()
|
||||
|
||||
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
@@ -10,6 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
Annotations: desc.Annotations,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// OCI1 is a manifest.Manifest implementation for OCI images.
|
||||
// The underlying data from imgspecv1.Manifest is also available.
|
||||
type OCI1 struct {
|
||||
@@ -45,16 +56,19 @@ func OCI1Clone(src *OCI1) *OCI1 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *OCI1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations}
|
||||
return BlobInfoFromOCI1Descriptor(m.Config)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *OCI1) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *OCI1) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.Layers {
|
||||
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
@@ -101,7 +115,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Os: v1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
41
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
41
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@@ -25,7 +25,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociA
|
||||
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -70,20 +70,39 @@ func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
|
||||
return d.unpackedDest.MustMatchRuntimeOS()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
|
||||
func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.unpackedDest.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination
|
||||
|
||||
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
@@ -28,7 +28,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
|
||||
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -76,9 +76,16 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
|
||||
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user