mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
322 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e079f9d61b | ||
|
|
ceabc0a404 | ||
|
|
523b8b44a2 | ||
|
|
d2d1796eb5 | ||
|
|
c67e5f7425 | ||
|
|
1b8686d044 | ||
|
|
a4de1428f9 | ||
|
|
524f6c0682 | ||
|
|
fa18fce7e8 | ||
|
|
96be1bb155 | ||
|
|
23c6b42b26 | ||
|
|
6307635b5f | ||
|
|
47e7cda4e9 | ||
|
|
5dd3b2bffd | ||
|
|
12f0e24519 | ||
|
|
b137741385 | ||
|
|
233804fedc | ||
|
|
0c90e57eaf | ||
|
|
8fb4ab3d92 | ||
|
|
8c9e250801 | ||
|
|
04aee56a36 | ||
|
|
4f1fabc2a4 | ||
|
|
43bc356337 | ||
|
|
41991bab70 | ||
|
|
2b5086167f | ||
|
|
b46d16f48c | ||
|
|
9fef0eb3f3 | ||
|
|
30b0a1741e | ||
|
|
945b9dc08f | ||
|
|
904b064da4 | ||
|
|
7ae62af073 | ||
|
|
7525a79c93 | ||
|
|
07287b5783 | ||
|
|
0a2a62ac20 | ||
|
|
5581c62a3a | ||
|
|
6b5bdb7563 | ||
|
|
2bdffc89c2 | ||
|
|
65e6449c95 | ||
|
|
2829f7da9e | ||
|
|
ece44c2842 | ||
|
|
0fa335c149 | ||
|
|
5c0ad57c2c | ||
|
|
b2934e7cf6 | ||
|
|
2af7114ea1 | ||
|
|
0e1cc9203e | ||
|
|
e255ccc145 | ||
|
|
9447a55b61 | ||
|
|
9fdceeb2b2 | ||
|
|
18ee5f8119 | ||
|
|
ab6a17059c | ||
|
|
81c5e94850 | ||
|
|
99dc83062a | ||
|
|
4d8ea6729f | ||
|
|
ac85091ecd | ||
|
|
ffa640c2b0 | ||
|
|
c73bcba7e6 | ||
|
|
329e1cf61c | ||
|
|
854f766dc7 | ||
|
|
5c73fdbfdc | ||
|
|
097549748a | ||
|
|
032309941b | ||
|
|
d93a581fb8 | ||
|
|
52075ab386 | ||
|
|
d65ae4b1d7 | ||
|
|
c32d27f59e | ||
|
|
883d65a54a | ||
|
|
94728fb73f | ||
|
|
520f0e5ddb | ||
|
|
fa39b49a5c | ||
|
|
0490018903 | ||
|
|
b434c8f424 | ||
|
|
79de2d9f09 | ||
|
|
2031e17b3c | ||
|
|
5a050c1383 | ||
|
|
404c5bd341 | ||
|
|
2134209960 | ||
|
|
1e8c029562 | ||
|
|
932b037d66 | ||
|
|
26a48586a0 | ||
|
|
683f4263ef | ||
|
|
ebfa1e936b | ||
|
|
509782e78b | ||
|
|
776b408f76 | ||
|
|
fee5981ebf | ||
|
|
d9e9604979 | ||
|
|
3606380bdb | ||
|
|
640b967463 | ||
|
|
b8b9913695 | ||
|
|
9e2720dfcc | ||
|
|
b329dd0d4e | ||
|
|
1b10352591 | ||
|
|
bba2874451 | ||
|
|
0322441640 | ||
|
|
8868d2ebe4 | ||
|
|
f19acc1c90 | ||
|
|
47f24b4097 | ||
|
|
c2597aab22 | ||
|
|
47065938da | ||
|
|
790620024e | ||
|
|
42b01df89e | ||
|
|
aafae2bc50 | ||
|
|
e5b9ea5ee6 | ||
|
|
1c2ff140cb | ||
|
|
f7c608e65e | ||
|
|
ec810c91fe | ||
|
|
17bea86e89 | ||
|
|
3e0026d907 | ||
|
|
3e98377bf2 | ||
|
|
0658bc80f3 | ||
|
|
e96a9b0e1b | ||
|
|
08c30b8f06 | ||
|
|
05212df1c5 | ||
|
|
7ec68dd463 | ||
|
|
6eb5131b85 | ||
|
|
736cd7641d | ||
|
|
78bd5dd3df | ||
|
|
ecd675e0a6 | ||
|
|
5675895460 | ||
|
|
0f8f870bd3 | ||
|
|
a51e38e60d | ||
|
|
8fe1595f92 | ||
|
|
2497f500d5 | ||
|
|
afa92d58f6 | ||
|
|
958cafb2c0 | ||
|
|
1d1bf0d393 | ||
|
|
3094320203 | ||
|
|
39de98777d | ||
|
|
8084f6f4e2 | ||
|
|
6ef45e5cf1 | ||
|
|
444b90a9cf | ||
|
|
72a3dc17ee | ||
|
|
88c748f47a | ||
|
|
7e8c89d619 | ||
|
|
694f915003 | ||
|
|
a77b409619 | ||
|
|
1faff791ce | ||
|
|
8b8afe0fda | ||
|
|
09a120a59b | ||
|
|
c769c7789e | ||
|
|
3ea3965e5e | ||
|
|
ee8391db34 | ||
|
|
e1cc97d9d7 | ||
|
|
f30756a9bb | ||
|
|
33b474b224 | ||
|
|
485a7aa330 | ||
|
|
59117e6e3d | ||
|
|
8ee3ead743 | ||
|
|
bc39e4f9b6 | ||
|
|
3017d87ade | ||
|
|
d8f1d4572b | ||
|
|
41d8dd8b80 | ||
|
|
bcf3dbbb93 | ||
|
|
bfc0c5e531 | ||
|
|
013ebac8d8 | ||
|
|
fbc2e4f70f | ||
|
|
72468d6817 | ||
|
|
5dec940523 | ||
|
|
761a6811c1 | ||
|
|
b3a023f9dd | ||
|
|
5aa217fe0d | ||
|
|
737438d026 | ||
|
|
1715c90841 | ||
|
|
187299a20b | ||
|
|
89d8bddd9b | ||
|
|
ba649c56bf | ||
|
|
3456577268 | ||
|
|
b52e700666 | ||
|
|
ee32f1f7aa | ||
|
|
5af0da9de6 | ||
|
|
879a6d793f | ||
|
|
2734f93e30 | ||
|
|
2b97124e4a | ||
|
|
7815a5801e | ||
|
|
501e1be3cf | ||
|
|
fc386a6dca | ||
|
|
2a134a0ddd | ||
|
|
17250d7e8d | ||
|
|
65d28709c3 | ||
|
|
d6c6c78d1b | ||
|
|
67ffa00b1d | ||
|
|
a581847345 | ||
|
|
bcd26a4ae4 | ||
|
|
e38c345f23 | ||
|
|
0421fb04c2 | ||
|
|
82186b916f | ||
|
|
15eed5beda | ||
|
|
81837bd55b | ||
|
|
3dec6a1cdf | ||
|
|
fe14427129 | ||
|
|
be27588418 | ||
|
|
fb84437cd1 | ||
|
|
d9b495ca38 | ||
|
|
6b93d4794f | ||
|
|
5d3849a510 | ||
|
|
fef142f811 | ||
|
|
2684e51aa5 | ||
|
|
e814f9605a | ||
|
|
5d136a46ed | ||
|
|
b0b750dfa1 | ||
|
|
e3034e1d91 | ||
|
|
1a259b76da | ||
|
|
ae64ff7084 | ||
|
|
d67d3a4620 | ||
|
|
196bc48723 | ||
|
|
1c6c7bc481 | ||
|
|
6e23a32282 | ||
|
|
f398c9c035 | ||
|
|
0144aa8dc5 | ||
|
|
0df5dcf09c | ||
|
|
f9baaa6b87 | ||
|
|
67ff78925b | ||
|
|
5c611083f2 | ||
|
|
976d57ea45 | ||
|
|
63569fcd63 | ||
|
|
98b3a13b46 | ||
|
|
ca3bff6a7c | ||
|
|
563a4ac523 | ||
|
|
14ea9f8bfd | ||
|
|
05e38e127e | ||
|
|
1ef80d8082 | ||
|
|
597b6bd204 | ||
|
|
7e9a664764 | ||
|
|
79449a358d | ||
|
|
2d04db9ac8 | ||
|
|
3e7a28481c | ||
|
|
79225f2e65 | ||
|
|
e1c1bbf26d | ||
|
|
c4808f002e | ||
|
|
42203b366d | ||
|
|
1f11b8b350 | ||
|
|
ea23621c70 | ||
|
|
ab2bc6e8d1 | ||
|
|
c520041b83 | ||
|
|
e626fca6a7 | ||
|
|
92b6262224 | ||
|
|
e8dea9e770 | ||
|
|
28080c8d5f | ||
|
|
0cea6dde02 | ||
|
|
22482e099a | ||
|
|
7aba888e99 | ||
|
|
c61482d2cf | ||
|
|
db941ebd8f | ||
|
|
7add6fc80b | ||
|
|
eb9d74090e | ||
|
|
61351d44d7 | ||
|
|
aa73bd9d0d | ||
|
|
b08350db15 | ||
|
|
f63f78225d | ||
|
|
60aa4aa82d | ||
|
|
37264e21fb | ||
|
|
fe2591054c | ||
|
|
fd0c3d7f08 | ||
|
|
b325cc22b8 | ||
|
|
5f754820da | ||
|
|
43acc747d5 | ||
|
|
b3dec98757 | ||
|
|
b1795a08fb | ||
|
|
1307cac0c2 | ||
|
|
dc1567c8bc | ||
|
|
22c524b0e0 | ||
|
|
9a225c3968 | ||
|
|
0270e5694c | ||
|
|
4ff902dab9 | ||
|
|
64b3bd28e3 | ||
|
|
d8e506c648 | ||
|
|
aa6c809e5a | ||
|
|
1c27d6918f | ||
|
|
9f2491694d | ||
|
|
14245f2e24 | ||
|
|
8a1d480274 | ||
|
|
78b29a5c2f | ||
|
|
20d31daec0 | ||
|
|
5a8f212630 | ||
|
|
34e77f9897 | ||
|
|
93876acc5e | ||
|
|
031283efb1 | ||
|
|
23c54feddd | ||
|
|
04e04edbfe | ||
|
|
cbedcd967e | ||
|
|
fa08bd7e91 | ||
|
|
874d119dd9 | ||
|
|
eb43d93b57 | ||
|
|
c1a0084bb3 | ||
|
|
e8fb01e1ed | ||
|
|
0543f551c7 | ||
|
|
27f320b27f | ||
|
|
c0dffd9b3e | ||
|
|
66a97d038e | ||
|
|
2e8377a708 | ||
|
|
a76cfb7dc7 | ||
|
|
409dce8a89 | ||
|
|
5b14746045 | ||
|
|
a3d2e8323a | ||
|
|
2be4deb980 | ||
|
|
5f71547262 | ||
|
|
6c791a0559 | ||
|
|
7fd6f66b7f | ||
|
|
a1b48be22e | ||
|
|
bb5584bc4c | ||
|
|
3e57660394 | ||
|
|
803619cabf | ||
|
|
8c1a69d1f6 | ||
|
|
24423ce4a7 | ||
|
|
76b071cf74 | ||
|
|
407a7d9e70 | ||
|
|
0d0055df05 | ||
|
|
63b3be2f13 | ||
|
|
62c68998d7 | ||
|
|
4125d741cf | ||
|
|
bd07ffb9f4 | ||
|
|
700199c944 | ||
|
|
40a5f48632 | ||
|
|
83ca466071 | ||
|
|
a7e8a9b4d4 | ||
|
|
3f10c1726d | ||
|
|
832eaa1f67 | ||
|
|
e2b2d25f24 | ||
|
|
3fa370fa2e | ||
|
|
88cff614ed | ||
|
|
b23cac9c05 | ||
|
|
c928962ea8 | ||
|
|
ee011b1bf9 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/docs/skopeo.1
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
@@ -21,4 +22,4 @@ install:
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make check ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
|
||||
@@ -8,12 +8,14 @@ that we follow.
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
<!--
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
-->
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[open issues](https://github.com/containers/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
@@ -37,9 +39,9 @@ It's ok to just open up a PR with the fix, but make sure you include the same
|
||||
information you would have included in an issue - like how to reproduce it.
|
||||
|
||||
PRs for new features should include some background on what use cases the
|
||||
new code is trying to address. And, when possible and it makes, try to break-up
|
||||
new code is trying to address. When possible and when it makes sense, try to break-up
|
||||
larger PRs into smaller ones - it's easier to review smaller
|
||||
code changes. But, only if those smaller ones make sense as stand-alone PRs.
|
||||
code changes. But only if those smaller ones make sense as stand-alone PRs.
|
||||
|
||||
Regardless of the type of PR, all PRs should include:
|
||||
* well documented code changes
|
||||
@@ -47,9 +49,9 @@ Regardless of the type of PR, all PRs should include:
|
||||
* documentation changes
|
||||
|
||||
Squash your commits into logical pieces of work that might want to be reviewed
|
||||
separate from the rest of the PRs. But, squashing down to just one commit is ok
|
||||
too since in the end the entire PR will be reviewed anyway. When in doubt,
|
||||
squash.
|
||||
separate from the rest of the PRs. Ideally, each commit should implement a single
|
||||
idea, and the PR branch should pass the tests at every commit. GitHub makes it easy
|
||||
to review the cumulative effect of many commits; so, when in doubt, use smaller commits.
|
||||
|
||||
PRs that fix issues should include a reference like `Closes #XXXX` in the
|
||||
commit message so that github will automatically close the referenced issue
|
||||
@@ -113,16 +115,45 @@ Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or dicsussions, please use the
|
||||
For general questions, or discussions, please use the
|
||||
IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[issues](https://github.com/containers/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/skopeo/pulls)
|
||||
[PRs](https://github.com/containers/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -7,15 +7,19 @@ RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md
|
||||
# gpgme bindings deps
|
||||
libassuan-devel gpgme-devel \
|
||||
ostree-devel \
|
||||
gnupg
|
||||
gnupg \
|
||||
# OpenShift deps
|
||||
which tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
bats jq podman \
|
||||
&& dnf clean all
|
||||
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN set -x \
|
||||
&& REGISTRY_COMMIT_SCHEMA1=ec87e9b6971d831f0eff752ddb54fb64693e51cd \
|
||||
&& REGISTRY_COMMIT=47a064d4195a9b56133891bbb13620c3ac83a827 \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
@@ -27,18 +31,21 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
RUN set -x \
|
||||
&& yum install -y which git tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone --depth 1 -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
# The sed edits out a "go < 1.5" check which works incorrectly with go ≥ 1.10. \
|
||||
&& sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' "$GOPATH/src/github.com/openshift/origin/hack/common.sh" \
|
||||
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
|
||||
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
|
||||
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \
|
||||
&& rm -rf "$GOPATH" \
|
||||
&& mkdir /registry
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
RUN go get github.com/golang/lint/golint
|
||||
WORKDIR /go/src/github.com/projectatomic/skopeo
|
||||
COPY . /go/src/github.com/projectatomic/skopeo
|
||||
RUN go version
|
||||
RUN go get golang.org/x/lint/golint
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM ubuntu:17.04
|
||||
FROM ubuntu:18.10
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
btrfs-tools \
|
||||
libbtrfs-dev \
|
||||
git-core \
|
||||
libdevmapper-dev \
|
||||
libgpgme11-dev \
|
||||
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
|
||||
105
Makefile
105
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container build-local clean install install-binary install-completions shell test-integration
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -22,68 +22,96 @@ CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(shell $(GO) env GOOS), linux)
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
DOCKER_FLAGS := docker run --rm -i #$(DOCKER_ENVS)
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
DOCKER_FLAGS += -t
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
OSTREE_BUILD_TAG = $(shell hack/ostree_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(OSTREE_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override BUILDTAGS = containers_image_ostree_stub exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
all: binary docs-in-container
|
||||
|
||||
# Build a docker image (skopeobuild) that has everything we need to build.
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'binary-local' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@echo " * 'check' - Including above validate, test-integration and test-unit"
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
# Build w/o using containers
|
||||
binary-local:
|
||||
$(GPGME_ENV) $(GO) build -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
$(GPGME_ENV) $(GO) build ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
binary-local-static:
|
||||
$(GPGME_ENV) $(GO) build -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" .
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
clean:
|
||||
rm -f skopeo docs/*.1
|
||||
@@ -99,29 +127,40 @@ install-binary: ./skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
|
||||
install-docs: docs/skopeo.1
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/skopeo.1 ${MANINSTALLDIR}/man1/skopeo.1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration
|
||||
check: validate test-unit test-integration test-system
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
# complicated set of options needed to run podman-in-podman
|
||||
test-system: build-container
|
||||
DTEMP=$(shell mktemp -d --tmpdir=/var/tmp podman-tmp.XXXXXX); \
|
||||
$(CONTAINER_CMD) --privileged --net=host \
|
||||
-v $$DTEMP:/var/lib/containers:Z \
|
||||
"$(IMAGE)" \
|
||||
bash -c 'BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-system'; \
|
||||
rc=$$?; \
|
||||
$(RM) -rf $$DTEMP; \
|
||||
exit $$rc
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
@@ -130,4 +169,12 @@ validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
.install.vndr:
|
||||
$(GO) get -u github.com/LK4D4/vndr
|
||||
|
||||
vendor: vendor.conf .install.vndr
|
||||
$(GOPATH)/bin/vndr \
|
||||
-whitelist '^github.com/containers/image/docs/.*' \
|
||||
-whitelist '^github.com/containers/image/registries.conf'
|
||||
|
||||
94
README.md
94
README.md
@@ -1,10 +1,19 @@
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
|
||||
`skopeo` is a command line utility that performs various operations on container images and image repositories. Skopeo works with API V2 registries such as Docker registries, the Atomic registry, private registries, local directories and local OCI-layout directories. Skopeo does not require a daemon to be running to perform these operations which consist of:
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
`skopeo` is a command line utility that performs various operations on container images and image repositories.
|
||||
|
||||
`skopeo` can work with [OCI images](https://github.com/opencontainers/image-spec) as well as the original Docker v2 images.
|
||||
|
||||
Skopeo works with API V2 registries such as Docker registries, the Atomic registry, private registries, local directories and local OCI-layout directories. Skopeo does not require a daemon to be running to perform these operations which consist of:
|
||||
|
||||
* Inspecting an image showing its properties including its layers.
|
||||
* Copying an image from and to various storage mechanisms.
|
||||
For example you can copy images from one registry to another, without requiring privilege.
|
||||
* Inspecting a remote image showing its properties including its layers, without requiring you to pull the image to the host.
|
||||
* Deleting an image from an image repository.
|
||||
* When required by the repository, skopeo can pass the appropriate credentials and certificates for authentication.
|
||||
|
||||
@@ -75,14 +84,25 @@ $ skopeo inspect docker://docker.io/fedora:rawhide | jq '.Digest'
|
||||
|
||||
Copying images
|
||||
-
|
||||
`skopeo` can copy container images between various storage mechanisms,
|
||||
e.g. Docker registries (including the Docker Hub), the Atomic Registry,
|
||||
local directories, and local OCI-layout directories:
|
||||
`skopeo` can copy container images between various storage mechanisms, including:
|
||||
* Docker distribution based registries
|
||||
|
||||
- The Docker Hub, OpenShift, GCR, Artifactory, Quay ...
|
||||
|
||||
* Container Storage backends
|
||||
|
||||
- Docker daemon storage
|
||||
|
||||
- github.com/containers/storage (Backend for CRI-O, Buildah and friends)
|
||||
|
||||
* Local directories
|
||||
|
||||
* Local OCI-layout directories
|
||||
|
||||
```sh
|
||||
$ skopeo copy docker://busybox:1-glibc atomic:myns/unsigned:streaming
|
||||
$ skopeo copy docker://busybox:latest dir:existingemptydirectory
|
||||
$ skopeo copy docker://busybox:latest oci:busybox_ocilayout
|
||||
$ skopeo copy docker://busybox:latest oci:busybox_ocilayout:latest
|
||||
```
|
||||
|
||||
Deleting images
|
||||
@@ -129,8 +149,16 @@ $ skopeo copy --src-creds=testuser:testpassword docker://myregistrydomain.com:50
|
||||
If your cli config is found but it doesn't contain the necessary credentials for the queried registry
|
||||
you'll get an error. You can fix this by either logging in (via `docker login`) or providing `--creds` or `--src-creds|--dest-creds`.
|
||||
|
||||
Building
|
||||
|
||||
Obtaining skopeo
|
||||
-
|
||||
`skopeo` may already be packaged in your distribution, for example on Fedora 23 and later you can install it using
|
||||
```sh
|
||||
$ sudo dnf install skopeo
|
||||
```
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.5 because it uses the latest `GO15VENDOREXPERIMENT` flag.
|
||||
|
||||
There are two ways to build skopeo: in a container, or locally without a container. Choose the one which better matches your needs and environment.
|
||||
@@ -144,14 +172,15 @@ Building without a container requires a bit more manual work and setup in your e
|
||||
Install the necessary dependencies:
|
||||
```sh
|
||||
Fedora$ sudo dnf install gpgme-devel libassuan-devel btrfs-progs-devel device-mapper-devel ostree-devel
|
||||
Ubuntu$ sudo apt install libgpgme-dev libassuan-dev libbtrfs-dev libdevmapper-dev libostree-dev
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
|
||||
Make sure to clone this repository in your `GOPATH` - otherwise compilation fails.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/projectatomic/skopeo $GOPATH/src/github.com/projectatomic/skopeo
|
||||
$ cd $GOPATH/src/github.com/projectatomic/skopeo && make binary-local
|
||||
$ git clone https://github.com/containers/skopeo $GOPATH/src/github.com/containers/skopeo
|
||||
$ cd $GOPATH/src/github.com/containers/skopeo && make binary-local
|
||||
```
|
||||
|
||||
### Building in a container
|
||||
@@ -163,6 +192,12 @@ Building in a container is simpler, but more restrictive:
|
||||
$ make binary # Or (make all) to also build documentation, see below.
|
||||
```
|
||||
|
||||
To build a pure-Go static binary (disables ostree, devicemapper, btrfs, and gpgme):
|
||||
|
||||
```sh
|
||||
$ make binary-static DISABLE_CGO=1
|
||||
```
|
||||
|
||||
### Building documentation
|
||||
To build the manual you will need go-md2man.
|
||||
```sh
|
||||
@@ -174,16 +209,12 @@ Then
|
||||
$ make docs
|
||||
```
|
||||
|
||||
Installing
|
||||
-
|
||||
If you built from source:
|
||||
### Installation
|
||||
Finally, after the binary and documentation is built:
|
||||
```sh
|
||||
$ sudo make install
|
||||
```
|
||||
`skopeo` is also available from Fedora 23 (and later):
|
||||
```sh
|
||||
$ sudo dnf install skopeo
|
||||
```
|
||||
|
||||
TODO
|
||||
-
|
||||
- list all images on registry?
|
||||
@@ -198,34 +229,7 @@ NOT TODO
|
||||
CONTRIBUTING
|
||||
-
|
||||
|
||||
### Dependencies management
|
||||
|
||||
`skopeo` uses [`vndr`](https://github.com/LK4D4/vndr) for dependencies management.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `vndr github.com/containers/image`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `vndr github.com/containers/image`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -3,71 +3,43 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.
|
||||
func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {
|
||||
sourceCtx, err := contextFromGlobalOptions(c, "src-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
additionalTags cli.StringSlice // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
|
||||
destinationCtx, err := contextFromGlobalOptions(c, "dest-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sourceCtx, destinationCtx, nil
|
||||
}
|
||||
|
||||
func copyHandler(context *cli.Context) error {
|
||||
if len(context.Args()) != 2 {
|
||||
return errors.New("Usage: copy source destination")
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
}
|
||||
|
||||
policyContext, err := getPolicyContext(context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(context.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", context.Args()[1], err)
|
||||
}
|
||||
signBy := context.String("sign-by")
|
||||
removeSignatures := context.Bool("remove-signatures")
|
||||
|
||||
sourceCtx, destinationCtx, err := contextsFromGlobalOptions(context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copy.Image(policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
})
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
return cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
@@ -76,50 +48,112 @@ var copyCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: copyHandler,
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-creds, screds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the source registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-creds, dcreds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the destination registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "src-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container source registry (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "dest-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container destination registry (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-ostree-tmp-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: commandAction(opts.run),
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: append(append(append([]cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "Suppress output information when copying images",
|
||||
Destination: &opts.quiet,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
Destination: &opts.removeSignatures,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
Destination: &opts.signByFingerprint,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageNames[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(imageNames[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", imageNames[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range opts.additionalTags {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
@@ -10,27 +11,22 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func deleteHandler(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
|
||||
ctx, err := contextFromGlobalOptions(context, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ref.DeleteImage(ctx)
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
func deleteCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
@@ -38,22 +34,33 @@ var deleteCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: deleteHandler,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageName, err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}
|
||||
|
||||
75
cmd/skopeo/flag.go
Normal file
75
cmd/skopeo/flag.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// newOptionalBool
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func newOptionalBoolValue(p *optionalBool) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
239
cmd/skopeo/flag_test.go
Normal file
239
cmd/skopeo/flag_test.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OB",
|
||||
Value: newOptionalBoolValue(&globalOB),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OB",
|
||||
Value: newOptionalBoolValue(&commandOB),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OB",
|
||||
Value: newOptionalBoolValue(&ob),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OS",
|
||||
Value: newOptionalStringValue(&globalOS),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OS",
|
||||
Value: newOptionalStringValue(&commandOS),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OS",
|
||||
Value: newOptionalStringValue(&os),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -21,7 +22,7 @@ type inspectOutput struct {
|
||||
Tag string `json:",omitempty"`
|
||||
Digest digest.Digest
|
||||
RepoTags []string
|
||||
Created time.Time
|
||||
Created *time.Time
|
||||
DockerVersion string
|
||||
Labels map[string]string
|
||||
Architecture string
|
||||
@@ -29,10 +30,24 @@ type inspectOutput struct {
|
||||
Layers []string
|
||||
}
|
||||
|
||||
var inspectCmd = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
|
||||
Supported transports:
|
||||
@@ -40,88 +55,121 @@ var inspectCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
img, err := parseImage(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Bool("raw") {
|
||||
_, err := c.App.Writer.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Possibly overridden for a docker.Image.
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for a docker.Image.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerImg, ok := img.(*docker.Image); ok {
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags()
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(out))
|
||||
return nil
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest or configuration",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "config",
|
||||
Usage: "output configuration",
|
||||
Destination: &opts.config,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img, err := parseImage(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.config && opts.raw {
|
||||
configBlob, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %v", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,113 +2,149 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var layersCmd = cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
rawSource, err := parseImageSource(c, c.Args()[0])
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range args[1:] {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(rawSource)
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var blobDigests []digest.Digest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, d)
|
||||
}
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, info.Digest)
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, configInfo.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, digest := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(types.BlobInfo{Digest: digest, Size: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit()
|
||||
},
|
||||
return dest.Commit(ctx)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -15,8 +17,22 @@ import (
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
// createApp returns a cli.App to be run or tested.
|
||||
func createApp() *cli.App {
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cli.App, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
@@ -28,75 +44,112 @@ func createApp() *cli.App {
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &opts.debug,
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
cli.GenericFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "policy",
|
||||
Value: "",
|
||||
Usage: "Path to a trust policy file",
|
||||
Name: "policy",
|
||||
Usage: "Path to a trust policy file",
|
||||
Destination: &opts.policyPath,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Destination: &opts.insecurePolicy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Name: "registries.d",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Destination: &opts.registriesDirPath,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-arch",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Destination: &opts.overrideArch,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-os",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Destination: &opts.overrideOS,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to the registries.conf file",
|
||||
Destination: &opts.registriesConfPath,
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalIsSet("tls-verify") {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
copyCmd,
|
||||
inspectCmd,
|
||||
layersCmd,
|
||||
deleteCmd,
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
copyCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
untrustedSignatureDumpCmd(),
|
||||
}
|
||||
return app
|
||||
return app, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(ctx *cli.Context) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext handles the global "policy" flag.
|
||||
func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
// getPolicyContext returns a *signature.PolicyContext based on opts.
|
||||
func (opts *globalOptions) getPolicyContext() (*signature.PolicyContext, error) {
|
||||
var policy *signature.Policy // This could be cached across calls in opts.
|
||||
var err error
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
if opts.insecurePolicy {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
} else if opts.policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
policy, err = signature.NewPolicyFromFile(opts.policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// commandTimeoutContext returns a context.Context and a cancellation callback based on opts.
|
||||
// The caller should usually "defer cancel()" immediately after calling this.
|
||||
func (opts *globalOptions) commandTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if opts.commandTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, opts.commandTimeout)
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import "bytes"
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
|
||||
@@ -3,17 +3,31 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func manifestDigest(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() cli.Command {
|
||||
opts := manifestDigestOptions{}
|
||||
return cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -23,13 +37,6 @@ func manifestDigest(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(context.App.Writer, "%s\n", digest)
|
||||
fmt.Fprintf(stdout, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifestDigestCmd = cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: manifestDigest,
|
||||
}
|
||||
|
||||
@@ -4,20 +4,41 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func standaloneSign(context *cli.Context) error {
|
||||
outputFile := context.String("output")
|
||||
if len(context.Args()) != 3 || outputFile == "" {
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
}
|
||||
|
||||
func standaloneSignCmd() cli.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
Destination: &opts.output,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 3 || opts.output == "" {
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
dockerReference := context.Args()[1]
|
||||
fingerprint := context.Args()[2]
|
||||
manifestPath := args[0]
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -34,33 +55,33 @@ func standaloneSign(context *cli.Context) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(outputFile, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", outputFile, err)
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneSignCmd = cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: standaloneSign,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
},
|
||||
},
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerify(context *cli.Context) error {
|
||||
if len(context.Args()) != 4 {
|
||||
func standaloneVerifyCmd() cli.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 4 {
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
expectedDockerReference := context.Args()[1]
|
||||
expectedFingerprint := context.Args()[2]
|
||||
signaturePath := context.Args()[3]
|
||||
manifestPath := args[0]
|
||||
expectedDockerReference := args[1]
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -81,22 +102,35 @@ func standaloneVerify(context *cli.Context) error {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(context.App.Writer, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
fmt.Fprintf(stdout, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneVerifyCmd = cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
func untrustedSignatureDumpCmd() cli.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
return cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := context.Args()[0]
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
@@ -111,20 +145,6 @@ func untrustedSignatureDump(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(context.App.Writer, string(untrustedOut))
|
||||
fmt.Fprintln(stdout, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
11
cmd/skopeo/unshare.go
Normal file
11
cmd/skopeo/unshare.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
47
cmd/skopeo/unshare_linux.go
Normal file
47
cmd/skopeo/unshare_linux.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/buildah/pkg/unshare"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
capability.CAP_CHOWN,
|
||||
capability.CAP_DAC_OVERRIDE,
|
||||
capability.CAP_FOWNER,
|
||||
capability.CAP_FSETID,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_SETFCAP,
|
||||
}
|
||||
|
||||
func maybeReexec() error {
|
||||
// With Skopeo we need only the subset of the root capabilities necessary
|
||||
// for pulling an image to the storage. Do not attempt to create a namespace
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
// We miss a capability we need, create a user namespaces
|
||||
unshare.MaybeReexecUsingUserNamespace(true)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage are used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
transport := alltransports.TransportFromImageName(imageName)
|
||||
if transport != nil && transport.Name() == storage.Transport.Name() {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -9,28 +11,184 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemContext, error) {
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the cli.ActionFunc interface and the real handler,
|
||||
// primarily to ensure that cli.Context is not available to the handler, which in turn
|
||||
// makes sure that the cli.String() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the Destination: members of cli.Flag.
|
||||
// handler may return errorShouldDisplayUsage to cause cli.ShowSubcommandHelp to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := handler(([]string)(c.Args()), c.App.Writer)
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
cli.ShowSubcommandHelp(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() ([]cli.Flag, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
return []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
Destination: &opts.authFilePath,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageOptions) {
|
||||
opts := imageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
}
|
||||
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
credsOptionExtra := ""
|
||||
if credsOptionAlias != "" {
|
||||
credsOptionExtra += "," + credsOptionAlias
|
||||
}
|
||||
|
||||
return []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "creds" + credsOptionExtra,
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
Value: newOptionalStringValue(&opts.credsOption),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "cert-dir",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon",
|
||||
Destination: &opts.dockerCertPath,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)",
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "shared-blob-dir",
|
||||
Usage: "`DIRECTORY` to use to share blobs across OCI repositories",
|
||||
Destination: &opts.sharedBlobDir,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "daemon-host",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "no-creds",
|
||||
Usage: "Access the registry anonymously",
|
||||
Destination: &opts.noCreds,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: c.GlobalString("registries.d"),
|
||||
DockerCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
// DEPRECATED: keep this here for backward compatibility, but override
|
||||
// them if per subcommand flags are provided (see below).
|
||||
DockerInsecureSkipTLSVerify: !c.GlobalBoolT("tls-verify"),
|
||||
OSTreeTmpDirPath: c.String(flagPrefix + "ostree-tmp-dir"),
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
SystemRegistriesConfPath: opts.global.registriesConfPath,
|
||||
}
|
||||
if c.IsSet(flagPrefix + "tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT(flagPrefix + "tls-verify")
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
if c.IsSet(flagPrefix + "creds") {
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.global.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.global.tlsVerify.value)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String(flagPrefix + "creds"))
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
osTreeTmpDir string // A directory to use for OSTree temporary files
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
|
||||
return append(genericFlags, []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "ostree-tmp-dir",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
Destination: &opts.osTreeTmpDir,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
Destination: &opts.dirForceCompression,
|
||||
},
|
||||
}...), &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx, err := opts.imageOptions.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
@@ -57,30 +215,29 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
}
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func parseImage(c *cli.Context) (types.Image, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImage(ctx)
|
||||
}
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(c *cli.Context, name string) (types.ImageSource, error) {
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
func parseImage(ctx context.Context, opts *imageOptions, name string) (types.ImageCloser, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImageSource(ctx)
|
||||
return ref.NewImage(ctx, sys)
|
||||
}
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ref.NewImageSource(ctx, sys)
|
||||
}
|
||||
|
||||
184
cmd/skopeo/utils_test.go
Normal file
184
cmd/skopeo/utils_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) *globalOptions {
|
||||
app, opts := createApp()
|
||||
|
||||
flagSet := flag.NewFlagSet(app.Name, flag.ContinueOnError)
|
||||
for _, f := range app.Flags {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(flags)
|
||||
require.NoError(t, err)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageDestOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-ostree-tmp-dir", "/srv/ostree-tmp-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
OSTreeTmpDirPath: "/srv/ostree-tmp-dir",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DirForceCompress: true,
|
||||
}, res)
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -5,52 +5,90 @@
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
case "$prev" in
|
||||
$options_with_args)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" -o "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
COMPREPLY=( $( compgen -W "$transports" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
${PROG} $subcommand --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-ostree-tmp-dir
|
||||
--dest-tls-verify
|
||||
"
|
||||
local boolean_options="
|
||||
--remove-signatures
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
local options_with_args="
|
||||
--authfile
|
||||
--format -f
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-dir
|
||||
--dest-ostree-tmp-dir
|
||||
--dest-tls-verify
|
||||
--src-daemon-host
|
||||
--dest-daemon-host
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--raw
|
||||
--tls-verify
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
@@ -75,52 +113,62 @@ _skopeo_manifest_digest() {
|
||||
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--authfile
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -138,15 +186,17 @@ _cli_bash_autocomplete() {
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "!${words[$counter]}" in
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
|
||||
60
contrib/containers-storage.conf.5.md
Normal file
60
contrib/containers-storage.conf.5.md
Normal file
@@ -0,0 +1,60 @@
|
||||
% storage.conf(5) Container Storage Configuration File
|
||||
% Dan Walsh
|
||||
% May 2017
|
||||
|
||||
# NAME
|
||||
storage.conf - Syntax of Container Storage configuration file
|
||||
|
||||
# DESCRIPTION
|
||||
The STORAGE configuration file specifies all of the available container storage options
|
||||
for tools using shared container storage.
|
||||
|
||||
# FORMAT
|
||||
The [TOML format][toml] is used as the encoding of the configuration file.
|
||||
Every option and subtable listed here is nested under a global "storage" table.
|
||||
No bare options are used. The format of TOML can be simplified to:
|
||||
|
||||
[table]
|
||||
option = value
|
||||
|
||||
[table.subtable1]
|
||||
option = value
|
||||
|
||||
[table.subtable2]
|
||||
option = value
|
||||
|
||||
## STORAGE TABLE
|
||||
|
||||
The `storage` table supports the following options:
|
||||
|
||||
**graphroot**=""
|
||||
container storage graph dir (default: "/var/lib/containers/storage")
|
||||
Default directory to store all writable content created by container storage programs.
|
||||
|
||||
**runroot**=""
|
||||
container storage run dir (default: "/var/run/containers/storage")
|
||||
Default directory to store all temporary writable content created by container storage programs.
|
||||
|
||||
**driver**=""
|
||||
container storage driver (default is "overlay")
|
||||
Default Copy On Write (COW) container storage driver.
|
||||
|
||||
### STORAGE OPTIONS TABLE
|
||||
|
||||
The `storage.options` table supports the following options:
|
||||
|
||||
**additionalimagestores**=[]
|
||||
Paths to additional container image stores. Usually these are read-only and stored on remote network shares.
|
||||
|
||||
**size**=""
|
||||
Maximum size of a container image. Default is 10GB. This flag can be used to set quota
|
||||
on the size of container images.
|
||||
|
||||
**override_kernel_check**=""
|
||||
Tell storage drivers to ignore kernel version checks. Some storage drivers assume that if a kernel is too
|
||||
old, the driver is not supported. But for kernels that have had the drivers backported, this flag
|
||||
allows users to override the checks.
|
||||
|
||||
# HISTORY
|
||||
May 2017, Originally compiled by Dan Walsh <dwalsh@redhat.com>
|
||||
Format copied from crio.conf man page created by Aleksa Sarai <asarai@suse.de>
|
||||
28
contrib/storage.conf
Normal file
28
contrib/storage.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
# storage.conf is the configuration file for all tools
|
||||
# that share the containers/storage libraries
|
||||
# See man 5 containers-storage.conf for more information
|
||||
|
||||
# The "container storage" table contains all of the server options.
|
||||
[storage]
|
||||
|
||||
# Default Storage Driver
|
||||
driver = "overlay"
|
||||
|
||||
# Temporary storage location
|
||||
runroot = "/var/run/containers/storage"
|
||||
|
||||
# Primary read-write location of container storage
|
||||
graphroot = "/var/lib/containers/storage"
|
||||
|
||||
[storage.options]
|
||||
# AdditionalImageStores is used to pass paths to additional read-only image stores
|
||||
# Must be comma separated list.
|
||||
additionalimagestores = [
|
||||
]
|
||||
|
||||
# Size is used to set a maximum size of the container image. Only supported by
|
||||
# certain container storage drivers (currently overlay, zfs, vfs, btrfs)
|
||||
size = ""
|
||||
|
||||
# OverrideKernelCheck tells the driver to ignore kernel checks based on kernel version
|
||||
override_kernel_check = "true"
|
||||
83
docs/skopeo-copy.1.md
Normal file
83
docs/skopeo-copy.1.md
Normal file
@@ -0,0 +1,83 @@
|
||||
% skopeo-copy(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
52
docs/skopeo-delete.1.md
Normal file
52
docs/skopeo-delete.1.md
Normal file
@@ -0,0 +1,52 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
71
docs/skopeo-inspect.1.md
Normal file
71
docs/skopeo-inspect.1.md
Normal file
@@ -0,0 +1,71 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
26
docs/skopeo-manifest-digest.1.md
Normal file
26
docs/skopeo-manifest-digest.1.md
Normal file
@@ -0,0 +1,26 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
34
docs/skopeo-standalone-sign.1.md
Normal file
34
docs/skopeo-standalone-sign.1.md
Normal file
@@ -0,0 +1,34 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
36
docs/skopeo-standalone-verify.1.md
Normal file
36
docs/skopeo-standalone-verify.1.md
Normal file
@@ -0,0 +1,36 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
240
docs/skopeo.1.md
240
docs/skopeo.1.md
@@ -1,14 +1,28 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
# NAME
|
||||
skopeo -- Various operations with container images and container image registries
|
||||
# SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
# DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries. For example, it is able to inspect a repository on a container registry and fetch image. It fetches the repository's manifest and it is able to show you a `docker inspect`-like json output about a whole repository or a tag. This tool, in contrast to `docker inspect`, helps you gather useful information about a repository or a tag without requiring you to run `docker pull` - e.g. - which tags are available for the given repository? which labels the image has?
|
||||
## NAME
|
||||
skopeo -- Command line utility used to interact with local and remote container images and container image registries
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
|
||||
## DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries.
|
||||
|
||||
`skopeo` can copy container images between various containers image stores, converting them as necessary. For example you can use `skopeo` to copy container images from one container registry to another.
|
||||
|
||||
`skopeo` can convert a Docker schema 2 or schema 1 container image to an OCI image.
|
||||
|
||||
`skopeo` can inspect a repository on a container registry without needlessly pulling the image. Pulling an image from a repository, especially a remote repository, is an expensive network and storage operation. Skopeo fetches the repository's manifest and displays a `docker inspect`-like json output about the repository or a tag. `skopeo`, in contrast to `docker inspect`, helps you gather useful information about a repository or a tag without requiring you to run `docker pull` - e.g. - Which tags are available for the given repository? Which labels does the image have?
|
||||
|
||||
`skopeo` can sign and verify container images.
|
||||
|
||||
`skopeo` can delete container images from a remote container registry.
|
||||
|
||||
Note: `skopeo` does not require any container runtimes to be running, to do most of
|
||||
its functionality. It also does not require root, unless you are copying images into a container runtime storage backend, like the docker daemon or github.com/containers/storage.
|
||||
|
||||
It also allows you to copy container images between various registries, possibly converting them as necessary, and to sign and verify images.
|
||||
## IMAGE NAMES
|
||||
Most commands refer to container images, using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
@@ -19,13 +33,13 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$HOME/.docker/config.json`, which is set e.g. using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formated file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
|
||||
**docker-daemon:**_docker-reference_
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can also be docker-daemon:algo:digest (an image ID).
|
||||
An image _docker-reference_ stored in the docker daemon internal storage. _docker-reference_ must contain either a tag or a digest. Alternatively, when reading images, the format can be docker-daemon:algo:digest (an image ID).
|
||||
|
||||
**oci:**_path_**:**_tag_
|
||||
An image _tag_ in a directory compliant with "Open Container Image Layout Specification" at _path_.
|
||||
@@ -33,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
# OPTIONS
|
||||
## OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
@@ -43,201 +57,39 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for container signature storage), overriding the default path.
|
||||
|
||||
**--override-arch** _arch_ Use _arch_ instead of the architecture of the machine for choosing images.
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
# COMMANDS
|
||||
## COMMANDS
|
||||
|
||||
## skopeo copy
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry (defaults to true)
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## skopeo delete
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you need to execute the container registry garabage collector. E.g.,
|
||||
|
||||
```sh
|
||||
$ docker exec -it registry bin/registry garbage-collect /etc/docker/registry/config.yml
|
||||
```
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## skopeo inspect
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## skopeo manifest-digest
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## skopeo standalone-sign
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## skopeo standalone-verify
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## skopeo help
|
||||
show help for `skopeo`
|
||||
|
||||
# FILES
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
# EXAMPLES
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
|
||||
## skopeo copy
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
## skopeo delete
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
## skopeo inspect
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
## skopeo layers
|
||||
Another method to retrieve the layers for the busybox image from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo layers docker://busybox
|
||||
$ ls layers-500650331/
|
||||
8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
manifest.json
|
||||
a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar
|
||||
```
|
||||
## skopeo manifest-digest
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
## skopeo standalone-sign
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
See `skopeo copy` above for the preferred method of signing images.
|
||||
## skopeo standalone-verify
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
# AUTHORS
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
546
docs/skopeo.svg
Normal file
546
docs/skopeo.svg
Normal file
@@ -0,0 +1,546 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="480.61456"
|
||||
height="472.66098"
|
||||
viewBox="0 0 127.1626 125.05822"
|
||||
version="1.1"
|
||||
id="svg8"
|
||||
inkscape:version="0.92.2 5c3e80d, 2017-08-06"
|
||||
sodipodi:docname="skopeo.svg"
|
||||
inkscape:export-filename="/home/duffy/Documents/Projects/Favors/skopeo-logo/skopeo.color.png"
|
||||
inkscape:export-xdpi="90"
|
||||
inkscape:export-ydpi="90">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84477">
|
||||
<stop
|
||||
style="stop-color:#0093d9;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84473" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84475" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84469">
|
||||
<stop
|
||||
style="stop-color:#f6e6c8;stop-opacity:1"
|
||||
offset="0"
|
||||
id="stop84465" />
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84467" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84461">
|
||||
<stop
|
||||
style="stop-color:#bfdce8;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84457" />
|
||||
<stop
|
||||
style="stop-color:#2a72ac;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84459" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84420">
|
||||
<stop
|
||||
style="stop-color:#a7a9ac;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84416" />
|
||||
<stop
|
||||
style="stop-color:#e7e8e9;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84418" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84347">
|
||||
<stop
|
||||
style="stop-color:#2c2d2f;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84343" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84345" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84339">
|
||||
<stop
|
||||
style="stop-color:#002442;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84335" />
|
||||
<stop
|
||||
style="stop-color:#151617;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84337" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84331">
|
||||
<stop
|
||||
style="stop-color:#003d6e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84327" />
|
||||
<stop
|
||||
style="stop-color:#59b5ff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84329" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
id="linearGradient84323">
|
||||
<stop
|
||||
style="stop-color:#dc9f2e;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop84319" />
|
||||
<stop
|
||||
style="stop-color:#ffffff;stop-opacity:1"
|
||||
offset="1"
|
||||
id="stop84321" />
|
||||
</linearGradient>
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84325"
|
||||
x1="221.5741"
|
||||
y1="250.235"
|
||||
x2="219.20772"
|
||||
y2="221.99771"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84331"
|
||||
id="linearGradient84333"
|
||||
x1="223.23239"
|
||||
y1="212.83418"
|
||||
x2="245.52328"
|
||||
y2="129.64345"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84341"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84347"
|
||||
id="linearGradient84349"
|
||||
x1="212.05453"
|
||||
y1="215.20055"
|
||||
x2="237.73705"
|
||||
y2="230.02835"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84363"
|
||||
x1="193.61516"
|
||||
y1="225.045"
|
||||
x2="224.08698"
|
||||
y2="223.54327"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84323"
|
||||
id="linearGradient84377"
|
||||
x1="182.72513"
|
||||
y1="222.54439"
|
||||
x2="184.01024"
|
||||
y2="210.35291"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84408"
|
||||
x1="211.73801"
|
||||
y1="225.48302"
|
||||
x2="204.24324"
|
||||
y2="238.46432"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84422"
|
||||
x1="190.931"
|
||||
y1="221.83777"
|
||||
x2="187.53873"
|
||||
y2="229.26593"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84339"
|
||||
id="linearGradient84425"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
x1="190.36137"
|
||||
y1="217.8925"
|
||||
x2="205.20828"
|
||||
y2="209.32063"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84441"
|
||||
x1="169.95944"
|
||||
y1="215.77036"
|
||||
x2="174.0289"
|
||||
y2="207.81528"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84420"
|
||||
id="linearGradient84455"
|
||||
x1="234.08092"
|
||||
y1="252.39755"
|
||||
x2="245.88477"
|
||||
y2="251.21777"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84461"
|
||||
id="radialGradient84463"
|
||||
cx="213.19594"
|
||||
cy="223.40646"
|
||||
fx="214.12064"
|
||||
fy="217.34077"
|
||||
r="33.39888"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="matrix(2.6813748,0.05304973,-0.0423372,2.1399146,-349.74924,-255.6421)" />
|
||||
<radialGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84469"
|
||||
id="radialGradient84471"
|
||||
cx="207.18298"
|
||||
cy="211.06483"
|
||||
fx="207.18298"
|
||||
fy="211.06483"
|
||||
r="2.77954"
|
||||
gradientTransform="matrix(1.4407627,0.18685239,-0.24637721,1.8997405,-38.989952,-218.98841)"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
<linearGradient
|
||||
inkscape:collect="always"
|
||||
xlink:href="#linearGradient84477"
|
||||
id="linearGradient84479"
|
||||
x1="241.60336"
|
||||
y1="255.46982"
|
||||
x2="244.45177"
|
||||
y2="250.4846"
|
||||
gradientUnits="userSpaceOnUse"
|
||||
gradientTransform="translate(0,10.583333)" />
|
||||
</defs>
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="517.27113"
|
||||
inkscape:cy="314.79773"
|
||||
inkscape:document-units="mm"
|
||||
inkscape:current-layer="layer1"
|
||||
inkscape:document-rotation="0"
|
||||
showgrid="false"
|
||||
units="px"
|
||||
inkscape:snap-global="false"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1376"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0" />
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(-149.15784,-175.92614)">
|
||||
<g
|
||||
id="g84497"
|
||||
style="stroke-width:1.32291663;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
transform="translate(0,10.583333)">
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84485"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="299.48376"
|
||||
y="87.963303"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84487"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="258.07639"
|
||||
y="92.60083"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84489"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="253.2236"
|
||||
y="91.796227"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="86.859642"
|
||||
x="331.21924"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84491"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
</g>
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
id="path84483"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84481"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<circle
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:1.32291663;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="path84224"
|
||||
cx="213.64427"
|
||||
cy="234.18927"
|
||||
r="35.482784" />
|
||||
<circle
|
||||
r="33.39888"
|
||||
cy="234.18927"
|
||||
cx="213.64427"
|
||||
id="circle84226"
|
||||
style="fill:url(#radialGradient84463);fill-opacity:1;stroke:none;stroke-width:0.52916664;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84114"
|
||||
width="31.605196"
|
||||
height="19.16976"
|
||||
x="304.77545"
|
||||
y="97.128738"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84116"
|
||||
width="4.521956"
|
||||
height="21.377089"
|
||||
x="300.27435"
|
||||
y="96.025078"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="99.087395"
|
||||
x="283.71848"
|
||||
height="15.252436"
|
||||
width="16.459545"
|
||||
id="rect84118"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="98.190086"
|
||||
x="280.00021"
|
||||
height="17.047071"
|
||||
width="3.617183"
|
||||
id="rect84120"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84122"
|
||||
width="16.725054"
|
||||
height="9.8947001"
|
||||
x="263.36807"
|
||||
y="101.76627"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
id="rect84124"
|
||||
width="4.8383565"
|
||||
height="11.503917"
|
||||
x="258.51526"
|
||||
y="100.96166"
|
||||
transform="rotate(30)" />
|
||||
<rect
|
||||
y="96.025078"
|
||||
x="336.51093"
|
||||
height="21.377089"
|
||||
width="4.521956"
|
||||
id="rect84126"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
transform="rotate(30)" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84325);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 207.24023,252.71811 25.53907,14.74414 8.52539,-14.76953 -25.53711,-14.74415 z"
|
||||
id="rect84313"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84128"
|
||||
d="m 215.3335,241.36799 22.49734,12.98884"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84130"
|
||||
d="m 246.61693,255.0795 -9.11198,15.78242 a 2.6351497,9.1643514 30 0 0 6.60453,-6.7032 2.6351497,9.1643514 30 0 0 2.50745,-9.07922 z"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 195.97877,212.80238 46.0456,26.61429 -3.50256,6.07342 -46.0456,-26.61429 z"
|
||||
id="path84134"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccccc" />
|
||||
<path
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 202.36709,199.05917 26.65552,8.43269 21.69622,19.51455 -8.68507,12.39398 -46.04559,-26.61429 z"
|
||||
id="path84136"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84422);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 186.31445,239.41146 1.30078,0.75 7.46485,-12.92968 -1.30078,-0.75 z"
|
||||
id="rect84410"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84349);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 193.92188,218.48568 44.21289,25.55469 2.44335,-4.23242 -44.21289,-25.55664 z"
|
||||
id="path84284"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84363);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 189.98438,240.4935 12.42187,7.16992 6.56641,-11.375 -12.42188,-7.16992 z"
|
||||
id="rect84351"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84377);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 173.69727,227.99936 12.65234,7.30273 3.88867,-6.73633 -12.65234,-7.30273 z"
|
||||
id="rect84365"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84138"
|
||||
d="m 192.47621,218.8758 -11.1013,8.29627 c 0,0 6.16202,4.57403 15.2798,4.67656 9.1178,0.1025 11.46925,-3.93799 11.46925,-3.93799 z"
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#000000;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<ellipse
|
||||
cy="223.01579"
|
||||
cx="207.08998"
|
||||
id="circle84140"
|
||||
style="fill:#ffffff;stroke:#000000;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
rx="3.8395541"
|
||||
ry="3.8438656" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84333);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:round;stroke-dashoffset:5.99999952"
|
||||
d="m 197.35938,212.35287 44.36523,25.64453 7.58984,-10.83203 -20.82617,-18.73242 -25.55078,-8.08399 z"
|
||||
id="path84272"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84142"
|
||||
d="m 200.6837,212.37603 11.49279,-6.98413 -8.11935,-2.73742"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path84144"
|
||||
d="m 241.31895,235.3047 -8.04514,-4.75769 10.057,-4.72299"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
sodipodi:nodetypes="ccc"
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.52899998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 241.06868,235.79543 -8.9307,-5.38071 10.81942,-5.07707"
|
||||
id="path84280"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:none;fill-rule:evenodd;stroke:#2a72ac;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 200.60886,211.70589 10.37702,-6.1817 -7.12581,-2.30459"
|
||||
id="path84290"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="ccc" />
|
||||
<path
|
||||
style="fill:url(#radialGradient84471);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 206.89258,220.23959 -0.29297,0.0352 -0.23633,0.0527 -0.26953,0.0898 -0.2793,0.125 -0.23437,0.13477 -0.20508,0.14648 -0.2207,0.19532 -0.18946,0.20117 -0.006,0.008 0.004,-0.008 -0.006,0.01 -0.008,0.01 -0.004,0.004 -0.006,0.006 -0.12109,0.1582 -0.002,0.004 -0.002,0.002 -0.16406,0.26758 -0.12109,0.24804 -0.0996,0.28125 -0.0645,0.24219 -0.0371,0.26367 -0.0176,0.31641 0.008,0.18164 0.0332,0.28711 0.0527,0.23437 0.004,0.0117 0.0937,0.28516 0.11133,0.24805 0.13086,0.23046 0.16992,0.23829 0.1836,0.20898 0.21093,0.19727 0.19532,0.14843 0.25586,0.15625 0.24218,0.11719 0.26172,0.0977 0.27344,0.0684 0.27344,0.043 0.29297,0.0137 0.18164,-0.008 0.29687,-0.0351 0.24024,-0.0547 0.27539,-0.0898 0.24218,-0.10938 0.25,-0.14453 0.23047,-0.16406 0.20899,-0.1836 0.20508,-0.21875 0.125,-0.16406 0.004,-0.006 0.1582,-0.25781 0.004,-0.008 0.12695,-0.26172 0.0996,-0.27344 0.002,-0.006 0.0586,-0.24023 0.0391,-0.26563 0.0176,-0.3125 -0.008,-0.17968 -0.0332,-0.28711 -0.0527,-0.23438 -0.004,-0.0117 -0.0937,-0.28515 -0.11132,-0.24805 -0.13086,-0.23047 -0.16993,-0.23828 -0.18554,-0.20899 -0.19922,-0.18945 -0.21875,-0.16406 -0.23828,-0.14844 -0.26563,-0.12695 -0.01,-0.004 -0.21875,-0.0801 -0.28516,-0.0723 -0.27344,-0.043 -0.29492,-0.0137 z"
|
||||
id="ellipse84292"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84425);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.79374999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 183.23633,227.10092 c 5.59753,3.20336 12.36881,4.51528 18.71366,3.17108 1.59516,-0.38 3.17489,-0.99021 4.44874,-2.04739 -0.73893,-0.64617 -1.68301,-0.99544 -2.49844,-1.53493 -3.78032,-2.18293 -7.56064,-4.36587 -11.34096,-6.5488 -3.10767,2.32001 -6.21533,4.64003 -9.323,6.96004 z"
|
||||
id="path84298"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cccccc" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84479);fill-opacity:1;stroke:none;stroke-width:0.79375005;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 238.62695,269.97787 0.006,-0.002 0.39453,-0.27735 0.41797,-0.34179 0.002,-0.002 0.45703,-0.42382 0.47851,-0.49219 0.0156,-0.0176 0.47656,-0.53711 0.002,-0.002 0.0117,-0.0137 0.48438,-0.5918 0.0117,-0.0156 0.49023,-0.64257 0.01,-0.0137 0.49609,-0.69726 0.48047,-0.71875 0.01,-0.0137 0.46485,-0.74805 0.004,-0.008 0.002,-0.002 0.30468,-0.51562 0.008,-0.0117 0.4375,-0.78711 0.40625,-0.77734 0.008,-0.0137 0.37109,-0.77149 0.008,-0.0156 0.33789,-0.75977 0.006,-0.0156 0.30078,-0.73829 0.27148,-0.74609 0.21289,-0.66602 0.17969,-0.66796 v -0.002 l 0.12305,-0.58203 0.002,-0.0137 0.0723,-0.51562 0.0176,-0.31836 z"
|
||||
id="path84379"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84408);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 202.78906,251.42318 2.08399,1.20118 9.6289,-16.67969 -2.08203,-1.20117 z"
|
||||
id="rect84396"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84441);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 169.0918,226.26889 2.35937,1.36133 4.69336,-8.13086 -2.35937,-1.36133 z"
|
||||
id="rect84429"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:url(#linearGradient84455);fill-opacity:1;stroke:none;stroke-width:0.79374999;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:5.99999952"
|
||||
d="m 234.17188,269.53842 2.08203,1.20312 9.63086,-16.67773 -2.08399,-1.20313 z"
|
||||
id="rect84443"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff;fill-rule:evenodd;stroke:#f8ead2;stroke-width:0.52916664;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 215.55025,240.82707 22.49734,12.98884"
|
||||
id="path84521"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 24 KiB |
7
hack/btrfs_installed_tag.sh
Executable file
7
hack/btrfs_installed_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo exclude_graphdriver_btrfs
|
||||
fi
|
||||
@@ -6,7 +6,7 @@ set -e
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/projectatomic/skopeo). Whatever version is checked out
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
@@ -19,7 +19,7 @@ set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/projectatomic/skopeo'
|
||||
export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/projectatomic/skopeo.git'
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
|
||||
18
hack/make/test-system
Executable file
18
hack/make/test-system
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
|
||||
# Run tests
|
||||
SKOPEO_BINARY=/usr/bin/skopeo bats --tap systemtest
|
||||
@@ -1,28 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
errors=$(go vet $(go list -e ./... | grep -v "$SKOPEO_PKG"/vendor))
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
for f in "${files[@]}"; do
|
||||
failedVet=$(go vet "$f")
|
||||
if [ "$failedVet" ]; then
|
||||
errors+=( "$failedVet" )
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
if [ -z "$errors" ]; then
|
||||
echo 'Congratulations! All Go source files have been vetted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from go vet:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo " - $err"
|
||||
done
|
||||
echo "$errors"
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "go vet" and commit the result.'
|
||||
echo
|
||||
|
||||
6
hack/ostree_tag.sh
Executable file
6
hack/ostree_tag.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
if pkg-config ostree-1 2> /dev/null ; then
|
||||
echo ostree
|
||||
else
|
||||
echo containers_image_ostree_stub
|
||||
fi
|
||||
@@ -4,13 +4,14 @@ set -e
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_projectatomic="${GOPATH}/src/github.com/projectatomic"
|
||||
mkdir -vp ${_projectatomic}
|
||||
ln -vsf $(pwd) ${_projectatomic}/skopeo
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go get -u github.com/cpuguy83/go-md2man github.com/golang/lint/golint
|
||||
go version
|
||||
go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_projectatomic}/skopeo
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local binary-local
|
||||
sudo make install
|
||||
skopeo -v
|
||||
|
||||
13
hack/tree_status.sh
Executable file
13
hack/tree_status.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
else
|
||||
echo "tree is dirty, please commit all changes and sync the vendor.conf"
|
||||
echo ""
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool.
|
||||
# For updating dependencies you should change `vendor.conf` file in root of the
|
||||
# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for
|
||||
# vndr usage.
|
||||
|
||||
set -e
|
||||
|
||||
if ! hash vndr; then
|
||||
echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
vndr "$@"
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -87,3 +87,7 @@ func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C)
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
c.Assert(string(out), check.Not(check.Matches), "(?s)"+wanted) // (?s) : '.' will also match newlines
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestInspectFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "inspect", "unknown")
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/image-tools/image"
|
||||
)
|
||||
|
||||
@@ -63,7 +64,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%commit\n",
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
key, key)
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
@@ -89,9 +90,11 @@ func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWithManifestList(c *check.C) {
|
||||
c.ExpectFailure("manifest-list-hotfix sacrificed hotfixes for being able to copy images")
|
||||
assertSkopeoFails(c, ".*can not copy docker://estesp/busybox:latest: manifest contains multiple images.*", "copy", "docker://estesp/busybox:latest", "dir:somedir")
|
||||
func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:latest", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesntMatchRuntimeOS(c *check.C) {
|
||||
@@ -151,7 +154,10 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
ociDest = "busybox-latest-noimage"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoFails(c, ".*Error initializing destination oci:busybox-latest-noimage:: cannot save image with empty image.ref.name.*", "copy", "docker://busybox:latest", "oci:"+ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox:latest", "oci:"+ociDest)
|
||||
_, err = os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
}
|
||||
|
||||
// Check whether dir: images in dir1 and dir2 are equal, ignoring schema1 signatures.
|
||||
@@ -376,7 +382,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
|
||||
// Compression during copy
|
||||
func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710.tar"
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
|
||||
topDir, err := ioutil.TempDir("", "compression-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -408,9 +414,7 @@ func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
fis, err := dirf.Readdir(-1)
|
||||
c.Assert(err, check.IsNil)
|
||||
for _, fi := range fis {
|
||||
if strings.HasSuffix(fi.Name(), ".tar") {
|
||||
c.Assert(fi.Size() < 2048, check.Equals, true)
|
||||
}
|
||||
c.Assert(fi.Size() < 2048, check.Equals, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -591,6 +595,32 @@ func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
s.testCopySchemaConversionRegistries(c, "docker://"+v2s1DockerRegistryURL+"/schema1", "docker://"+v2DockerRegistryURL+"/schema2")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
topDir, err := ioutil.TempDir("", "manifest-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
srcDir := filepath.Join(topDir, "source")
|
||||
destDir1 := filepath.Join(topDir, "dest1")
|
||||
destDir2 := filepath.Join(topDir, "dest2")
|
||||
|
||||
// oci to v2s1 and vice-versa not supported yet
|
||||
// get v2s2 manifest type
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "dir:"+srcDir)
|
||||
verifyManifestMIMEType(c, srcDir, manifest.DockerV2Schema2MediaType)
|
||||
// convert from v2s2 to oci
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=oci", "dir:"+srcDir, "dir:"+destDir1)
|
||||
verifyManifestMIMEType(c, destDir1, imgspecv1.MediaTypeImageManifest)
|
||||
// convert from oci to v2s2
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2)
|
||||
verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType)
|
||||
// convert from v2s2 to v2s1
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=v2s1", "dir:"+srcDir, "dir:"+destDir1)
|
||||
verifyManifestMIMEType(c, destDir1, manifest.DockerV2Schema1SignedMediaType)
|
||||
// convert from v2s1 to v2s2
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format=v2s2", "dir:"+destDir1, "dir:"+destDir2)
|
||||
verifyManifestMIMEType(c, destDir2, manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
|
||||
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
|
||||
topDir, err := ioutil.TempDir("", "schema-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -632,3 +662,41 @@ func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
assertSkopeoFails(c, `.*Invalid image name.*`, "copy", "unknown:transport", "unknown:test")
|
||||
}
|
||||
|
||||
28
integration/fixtures/registries.conf
Normal file
28
integration/fixtures/registries.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
[[registry]]
|
||||
location = "mirror.invalid"
|
||||
mirror = [
|
||||
{ location = "mirror-0.invalid" },
|
||||
{ location = "mirror-1.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
# This entry is currently unused and exists only to ensure
|
||||
# that the mirror.invalid/busybox is not rewritten twice.
|
||||
[[registry]]
|
||||
location = "gcr.io"
|
||||
prefix = "gcr.io/google-containers"
|
||||
|
||||
[[registry]]
|
||||
location = "invalid.invalid"
|
||||
mirror = [
|
||||
{ location = "invalid-mirror-0.invalid" },
|
||||
{ location = "invalid-mirror-1.invalid" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "gcr.invalid"
|
||||
prefix = "gcr.invalid/foo/bar"
|
||||
mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
@@ -44,7 +44,7 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
19
systemtest/001-basic.bats
Normal file
19
systemtest/001-basic.bats
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Simplest set of skopeo tests. If any of these fail, we have serious problems.
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
# Override standard setup! We don't yet trust anything
|
||||
function setup() {
|
||||
:
|
||||
}
|
||||
|
||||
@test "skopeo version emits reasonable output" {
|
||||
run_skopeo --version
|
||||
|
||||
expect_output --substring "skopeo version [0-9.]+"
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
67
systemtest/010-inspect.bats
Normal file
67
systemtest/010-inspect.bats
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Simplest test for skopeo inspect
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
@test "inspect: basic" {
|
||||
workdir=$TESTDIR/inspect
|
||||
|
||||
remote_image=docker://quay.io/libpod/alpine_labels:latest
|
||||
# Inspect remote source, then pull it. There's a small race condition
|
||||
# in which the remote image can get updated between the inspect and
|
||||
# the copy; let's just not worry about it.
|
||||
run_skopeo inspect $remote_image
|
||||
inspect_remote=$output
|
||||
|
||||
# Now pull it into a directory
|
||||
run_skopeo copy $remote_image dir:$workdir
|
||||
expect_output --substring "Getting image source signatures"
|
||||
expect_output --substring "Writing manifest to image destination"
|
||||
|
||||
# Unpacked contents must include a manifest and version
|
||||
[ -e $workdir/manifest.json ]
|
||||
[ -e $workdir/version ]
|
||||
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
# For each of the given keys (LHS of the table below):
|
||||
# 1) Get local and remote values
|
||||
# 2) Sanity-check local value using simple expression
|
||||
# 3) Confirm that local and remote values match.
|
||||
#
|
||||
# The reason for (2) is to make sure that we don't compare bad results
|
||||
#
|
||||
# The reason for a hardcoded list, instead of 'jq keys', is that RepoTags
|
||||
# is always empty locally, but a list remotely.
|
||||
while read key expect; do
|
||||
local=$(echo "$inspect_local" | jq -r ".$key")
|
||||
remote=$(echo "$inspect_remote" | jq -r ".$key")
|
||||
|
||||
expect_output --from="$local" --substring "$expect" \
|
||||
"local $key is sane"
|
||||
|
||||
expect_output --from="$remote" "$local" \
|
||||
"local $key matches remote"
|
||||
done <<END_EXPECT
|
||||
Architecture amd64
|
||||
Created [0-9-]+T[0-9:]+\.[0-9]+Z
|
||||
Digest sha256:[0-9a-f]{64}
|
||||
DockerVersion [0-9]+\.[0-9][0-9.-]+
|
||||
Labels \\\{.*PODMAN.*podman.*\\\}
|
||||
Layers \\\[.*sha256:.*\\\]
|
||||
Os linux
|
||||
END_EXPECT
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
79
systemtest/020-copy.bats
Normal file
79
systemtest/020-copy.bats
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Copy tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry reg
|
||||
}
|
||||
|
||||
# From remote, to dir1, to local, to dir2;
|
||||
# compare dir1 and dir2, expect no changes
|
||||
@test "copy: dir, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy $remote_image dir:$dir1
|
||||
run_skopeo copy --dest-tls-verify=false dir:$dir1 $localimg
|
||||
run_skopeo copy --src-tls-verify=false $localimg dir:$dir2
|
||||
|
||||
# Both extracted copies must be identical
|
||||
diff -urN $dir1 $dir2
|
||||
}
|
||||
|
||||
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
|
||||
@test "copy: oci, round trip" {
|
||||
local remote_image=docker://busybox:latest
|
||||
local localimg=docker://localhost:5000/busybox:unsigned
|
||||
|
||||
local dir1=$TESTDIR/oci1
|
||||
local dir2=$TESTDIR/oci2
|
||||
|
||||
run_skopeo copy $remote_image oci:$dir1:latest
|
||||
run_skopeo copy --dest-tls-verify=false oci:$dir1:latest $localimg
|
||||
run_skopeo copy --src-tls-verify=false $localimg oci:$dir2:latest
|
||||
|
||||
# Both extracted copies must be identical
|
||||
diff -urN $dir1 $dir2
|
||||
}
|
||||
|
||||
# Same image, extracted once with :tag and once without
|
||||
@test "copy: oci w/ and w/o tags" {
|
||||
local remote_image=docker://busybox:latest
|
||||
|
||||
local dir1=$TESTDIR/dir1
|
||||
local dir2=$TESTDIR/dir2
|
||||
|
||||
run_skopeo copy $remote_image oci:$dir1
|
||||
run_skopeo copy $remote_image oci:$dir2:withtag
|
||||
|
||||
# Both extracted copies must be identical, except for index.json
|
||||
diff -urN --exclude=index.json $dir1 $dir2
|
||||
|
||||
# ...which should differ only in the tag. (But that's too hard to check)
|
||||
grep '"org.opencontainers.image.ref.name":"withtag"' $dir2/index.json
|
||||
}
|
||||
|
||||
# This one seems unlikely to get fixed
|
||||
@test "copy: bug 651" {
|
||||
skip "Enable this once skopeo issue #651 has been fixed"
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://quay.io/libpod/alpine_labels:latest \
|
||||
docker://localhost:5000/foo
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
32
systemtest/030-local-registry-tls.bats
Normal file
32
systemtest/030-local-registry-tls.bats
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Confirm that skopeo will push to and pull from a local
|
||||
# registry with locally-created TLS certificates.
|
||||
#
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
start_registry --with-cert reg
|
||||
}
|
||||
|
||||
@test "local registry, with cert" {
|
||||
# Push to local registry...
|
||||
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
|
||||
docker://busybox:latest \
|
||||
docker://localhost:5000/busybox:unsigned
|
||||
|
||||
# ...and pull it back out
|
||||
run_skopeo copy --src-cert-dir=$TESTDIR/client-auth \
|
||||
docker://localhost:5000/busybox:unsigned \
|
||||
dir:$TESTDIR/extracted
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
78
systemtest/040-local-registry-auth.bats
Normal file
78
systemtest/040-local-registry-auth.bats
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests with a local registry with auth
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
# Remove old/stale cred file
|
||||
_cred_dir=$TESTDIR/credentials
|
||||
export XDG_RUNTIME_DIR=$_cred_dir
|
||||
mkdir -p $_cred_dir/containers
|
||||
rm -f $_cred_dir/containers/auth.json
|
||||
|
||||
# Start authenticated registry with random password
|
||||
testuser=testuser
|
||||
testpassword=$(random_string 15)
|
||||
|
||||
start_registry --testuser=testuser --testpassword=$testpassword reg
|
||||
}
|
||||
|
||||
@test "auth: credentials on command line" {
|
||||
# No creds
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Wrong user
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=baduser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Wrong password
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:badpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
|
||||
# Correct creds, but no such image
|
||||
run_skopeo 1 inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/nonesuch
|
||||
expect_output --substring "manifest unknown: manifest unknown"
|
||||
|
||||
# These should pass
|
||||
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
|
||||
docker://busybox:latest docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false --creds=$testuser:$testpassword \
|
||||
docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
}
|
||||
|
||||
@test "auth: credentials via podman login" {
|
||||
# Logged in: skopeo should work
|
||||
podman login --tls-verify=false -u $testuser -p $testpassword localhost:5000
|
||||
|
||||
run_skopeo copy --dest-tls-verify=false \
|
||||
docker://busybox:latest docker://localhost:5000/busybox:mine
|
||||
run_skopeo inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "localhost:5000/busybox"
|
||||
|
||||
# Logged out: should fail
|
||||
podman logout localhost:5000
|
||||
|
||||
run_skopeo 1 inspect --tls-verify=false docker://localhost:5000/busybox:mine
|
||||
expect_output --substring "unauthorized: authentication required"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
if [[ -n $_cred_dir ]]; then
|
||||
rm -rf $_cred_dir
|
||||
fi
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
151
systemtest/050-signing.bats
Normal file
151
systemtest/050-signing.bats
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Tests with gpg signing
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
|
||||
# Create dummy gpg keys
|
||||
export GNUPGHOME=$TESTDIR/skopeo-gpg
|
||||
mkdir --mode=0700 $GNUPGHOME
|
||||
|
||||
# gpg on f30 needs this, otherwise:
|
||||
# gpg: agent_genkey failed: Inappropriate ioctl for device
|
||||
# ...but gpg on f29 (and, probably, Ubuntu) doesn't grok this
|
||||
GPGOPTS='--pinentry-mode loopback'
|
||||
if gpg --pinentry-mode asdf 2>&1 | grep -qi 'Invalid option'; then
|
||||
GPGOPTS=
|
||||
fi
|
||||
|
||||
for k in alice bob;do
|
||||
gpg --batch $GPGOPTS --gen-key --passphrase '' <<END_GPG
|
||||
Key-Type: RSA
|
||||
Name-Real: Test key - $k
|
||||
Name-email: $k@test.redhat.com
|
||||
%commit
|
||||
END_GPG
|
||||
|
||||
gpg --armor --export $k@test.redhat.com >$GNUPGHOME/pubkey-$k.gpg
|
||||
done
|
||||
|
||||
# Registries. The important part here seems to be sigstore,
|
||||
# because (I guess?) the registry itself has no mechanism
|
||||
# for storing or validating signatures.
|
||||
REGISTRIES_D=$TESTDIR/registries.d
|
||||
mkdir $REGISTRIES_D $TESTDIR/sigstore
|
||||
cat >$REGISTRIES_D/registries.yaml <<EOF
|
||||
docker:
|
||||
localhost:5000:
|
||||
sigstore: file://$TESTDIR/sigstore
|
||||
EOF
|
||||
|
||||
# Policy file. Basically, require /myns/alice and /myns/bob
|
||||
# to be signed; allow /open; and reject anything else.
|
||||
POLICY_JSON=$TESTDIR/policy.json
|
||||
cat >$POLICY_JSON <<END_POLICY_JSON
|
||||
{
|
||||
"default": [
|
||||
{
|
||||
"type": "reject"
|
||||
}
|
||||
],
|
||||
"transports": {
|
||||
"docker": {
|
||||
"localhost:5000/myns/alice": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "$GNUPGHOME/pubkey-alice.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/bob": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "$GNUPGHOME/pubkey-bob.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/open": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
END_POLICY_JSON
|
||||
|
||||
start_registry reg
|
||||
}
|
||||
|
||||
@test "signing" {
|
||||
run_skopeo '?' standalone-sign /dev/null busybox alice@test.redhat.com -o /dev/null
|
||||
if [[ "$output" =~ 'signing is not supported' ]]; then
|
||||
skip "skopeo built without support for creating signatures"
|
||||
return 1
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
die "exit code is $status; expected $expected_rc"
|
||||
fi
|
||||
|
||||
# Cache local copy
|
||||
run_skopeo copy docker://busybox:latest dir:$TESTDIR/busybox
|
||||
|
||||
# Push a bunch of images. Do so *without* --policy flag; this lets us
|
||||
# sign or not, creating images that will or won't conform to policy.
|
||||
while read path sig comments; do
|
||||
local sign_opt=
|
||||
if [[ $sig != '-' ]]; then
|
||||
sign_opt="--sign-by=${sig}@test.redhat.com"
|
||||
fi
|
||||
run_skopeo --registries.d $REGISTRIES_D \
|
||||
copy --dest-tls-verify=false \
|
||||
$sign_opt \
|
||||
dir:$TESTDIR/busybox \
|
||||
docker://localhost:5000$path
|
||||
done <<END_PUSH
|
||||
/myns/alice:signed alice # Properly-signed image
|
||||
/myns/alice:unsigned - # Unsigned image to path that requires signature
|
||||
/myns/bob:signedbyalice alice # Bad signature: image under /bob
|
||||
/myns/carol:latest - # No signature
|
||||
/open/forall:latest - # No signature, but none needed
|
||||
END_PUSH
|
||||
|
||||
# Done pushing. Now try to fetch. From here on we use the --policy option.
|
||||
# The table below lists the paths to fetch, and the expected errors (or
|
||||
# none, if we expect them to pass).
|
||||
while read path expected_error; do
|
||||
expected_rc=
|
||||
if [[ -n $expected_error ]]; then
|
||||
expected_rc=1
|
||||
fi
|
||||
|
||||
rm -rf $TESTDIR/d
|
||||
run_skopeo $expected_rc \
|
||||
--registries.d $REGISTRIES_D \
|
||||
--policy $POLICY_JSON \
|
||||
copy --src-tls-verify=false \
|
||||
docker://localhost:5000$path \
|
||||
dir:$TESTDIR/d
|
||||
if [[ -n $expected_error ]]; then
|
||||
expect_output --substring "Source image rejected: $expected_error"
|
||||
fi
|
||||
done <<END_TESTS
|
||||
/myns/alice:signed
|
||||
/myns/bob:signedbyalice Invalid GPG signature
|
||||
/myns/alice:unsigned Signature for identity localhost:5000/myns/alice:signed is not accepted
|
||||
/myns/carol:latest Running image docker://localhost:5000/myns/carol:latest is rejected by policy.
|
||||
/open/forall:latest
|
||||
END_TESTS
|
||||
}
|
||||
|
||||
teardown() {
|
||||
podman rm -f reg
|
||||
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
344
systemtest/helpers.bash
Normal file
344
systemtest/helpers.bash
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/bin/bash
|
||||
|
||||
SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
|
||||
|
||||
# Default timeout for a skopeo command.
|
||||
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN setup/teardown
|
||||
|
||||
# Provide common setup and teardown functions, but do not name them such!
|
||||
# That way individual tests can override with their own setup/teardown,
|
||||
# while retaining the ability to include these if they so desire.
|
||||
|
||||
function standard_setup() {
|
||||
# Argh. Although BATS provides $BATS_TMPDIR, it's just /tmp!
|
||||
# That's bloody worthless. Let's make our own, in which subtests
|
||||
# can write whatever they like and trust that it'll be deleted
|
||||
# on cleanup.
|
||||
TESTDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} skopeo_bats.XXXXXX)
|
||||
}
|
||||
|
||||
function standard_teardown() {
|
||||
if [[ -n $TESTDIR ]]; then
|
||||
rm -rf $TESTDIR
|
||||
fi
|
||||
}
|
||||
|
||||
# Individual .bats files may override or extend these
|
||||
function setup() {
|
||||
standard_setup
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# END setup/teardown
|
||||
###############################################################################
|
||||
# BEGIN standard helpers for running skopeo and testing results
|
||||
|
||||
#################
|
||||
# run_skopeo # Invoke skopeo, with timeout, using BATS 'run'
|
||||
#################
|
||||
#
|
||||
# This is the preferred mechanism for invoking skopeo:
|
||||
#
|
||||
# * we use 'timeout' to abort (with a diagnostic) if something
|
||||
# takes too long; this is preferable to a CI hang.
|
||||
# * we log the command run and its output. This doesn't normally
|
||||
# appear in BATS output, but it will if there's an error.
|
||||
# * we check exit status. Since the normal desired code is 0,
|
||||
# that's the default; but the first argument can override:
|
||||
#
|
||||
# run_skopeo 125 nonexistent-subcommand
|
||||
# run_skopeo '?' some-other-command # let our caller check status
|
||||
#
|
||||
# Since we use the BATS 'run' mechanism, $output and $status will be
|
||||
# defined for our caller.
|
||||
#
|
||||
function run_skopeo() {
|
||||
# Number as first argument = expected exit code; default 0
|
||||
expected_rc=0
|
||||
case "$1" in
|
||||
[0-9]) expected_rc=$1; shift;;
|
||||
[1-9][0-9]) expected_rc=$1; shift;;
|
||||
[12][0-9][0-9]) expected_rc=$1; shift;;
|
||||
'?') expected_rc= ; shift;; # ignore exit code
|
||||
esac
|
||||
|
||||
# Remember command args, for possible use in later diagnostic messages
|
||||
MOST_RECENT_SKOPEO_COMMAND="skopeo $*"
|
||||
|
||||
# stdout is only emitted upon error; this echo is to help a debugger
|
||||
echo "\$ $SKOPEO_BINARY $*"
|
||||
run timeout --foreground --kill=10 $SKOPEO_TIMEOUT ${SKOPEO_BINARY} "$@"
|
||||
# without "quotes", multiple lines are glommed together into one
|
||||
if [ -n "$output" ]; then
|
||||
echo "$output"
|
||||
fi
|
||||
if [ "$status" -ne 0 ]; then
|
||||
echo -n "[ rc=$status ";
|
||||
if [ -n "$expected_rc" ]; then
|
||||
if [ "$status" -eq "$expected_rc" ]; then
|
||||
echo -n "(expected) ";
|
||||
else
|
||||
echo -n "(** EXPECTED $expected_rc **) ";
|
||||
fi
|
||||
fi
|
||||
echo "]"
|
||||
fi
|
||||
|
||||
if [ "$status" -eq 124 -o "$status" -eq 137 ]; then
|
||||
# FIXME: 'timeout -v' requires coreutils-8.29; travis seems to have
|
||||
# an older version. If/when travis updates, please add -v
|
||||
# to the 'timeout' command above, and un-comment this out:
|
||||
# if expr "$output" : ".*timeout: sending" >/dev/null; then
|
||||
echo "*** TIMED OUT ***"
|
||||
false
|
||||
fi
|
||||
|
||||
if [ -n "$expected_rc" ]; then
|
||||
if [ "$status" -ne "$expected_rc" ]; then
|
||||
die "exit code is $status; expected $expected_rc"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
#########
|
||||
# die # Abort with helpful message
|
||||
#########
|
||||
function die() {
|
||||
echo "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv" >&2
|
||||
echo "#| FAIL: $*" >&2
|
||||
echo "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" >&2
|
||||
false
|
||||
}
|
||||
|
||||
###################
|
||||
# expect_output # Compare actual vs expected string; fail if mismatch
|
||||
###################
|
||||
#
|
||||
# Compares $output against the given string argument. Optional second
|
||||
# argument is descriptive text to show as the error message (default:
|
||||
# the command most recently run by 'run_skopeo'). This text can be
|
||||
# useful to isolate a failure when there are multiple identical
|
||||
# run_skopeo invocations, and the difference is solely in the
|
||||
# config or setup; see, e.g., run.bats:run-cmd().
|
||||
#
|
||||
# By default we run an exact string comparison; use --substring to
|
||||
# look for the given string anywhere in $output.
|
||||
#
|
||||
# By default we look in "$output", which is set in run_skopeo().
|
||||
# To override, use --from="some-other-string" (e.g. "${lines[0]}")
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# expect_output "this is exactly what we expect"
|
||||
# expect_output "foo=bar" "description of this particular test"
|
||||
# expect_output --from="${lines[0]}" "expected first line"
|
||||
#
|
||||
function expect_output() {
|
||||
# By default we examine $output, the result of run_skopeo
|
||||
local actual="$output"
|
||||
local check_substring=
|
||||
|
||||
# option processing: recognize --from="...", --substring
|
||||
local opt
|
||||
for opt; do
|
||||
local value=$(expr "$opt" : '[^=]*=\(.*\)')
|
||||
case "$opt" in
|
||||
--from=*) actual="$value"; shift;;
|
||||
--substring) check_substring=1; shift;;
|
||||
--) shift; break;;
|
||||
-*) die "Invalid option '$opt'" ;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
local expect="$1"
|
||||
local testname="${2:-${MOST_RECENT_SKOPEO_COMMAND:-[no test name given]}}"
|
||||
|
||||
if [ -z "$expect" ]; then
|
||||
if [ -z "$actual" ]; then
|
||||
return
|
||||
fi
|
||||
expect='[no output]'
|
||||
elif [ "$actual" = "$expect" ]; then
|
||||
return
|
||||
elif [ -n "$check_substring" ]; then
|
||||
if [[ "$actual" =~ $expect ]]; then
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# This is a multi-line message, which may in turn contain multi-line
|
||||
# output, so let's format it ourself, readably
|
||||
local -a actual_split
|
||||
readarray -t actual_split <<<"$actual"
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
printf "#| FAIL: $testname\n" >&2
|
||||
printf "#| expected: '%s'\n" "$expect" >&2
|
||||
printf "#| actual: '%s'\n" "${actual_split[0]}" >&2
|
||||
local line
|
||||
for line in "${actual_split[@]:1}"; do
|
||||
printf "#| > '%s'\n" "$line" >&2
|
||||
done
|
||||
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
|
||||
false
|
||||
}
|
||||
|
||||
#######################
|
||||
# expect_line_count # Check the expected number of output lines
|
||||
#######################
|
||||
#
|
||||
# ...from the most recent run_skopeo command
|
||||
#
|
||||
function expect_line_count() {
|
||||
local expect="$1"
|
||||
local testname="${2:-${MOST_RECENT_SKOPEO_COMMAND:-[no test name given]}}"
|
||||
|
||||
local actual="${#lines[@]}"
|
||||
if [ "$actual" -eq "$expect" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
printf "#/vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n" >&2
|
||||
printf "#| FAIL: $testname\n" >&2
|
||||
printf "#| Expected %d lines of output, got %d\n" $expect $actual >&2
|
||||
printf "#| Output was:\n" >&2
|
||||
local line
|
||||
for line in "${lines[@]}"; do
|
||||
printf "#| >%s\n" "$line" >&2
|
||||
done
|
||||
printf "#\\^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" >&2
|
||||
false
|
||||
}
|
||||
|
||||
# END standard helpers for running skopeo and testing results
|
||||
###############################################################################
|
||||
# BEGIN helpers for starting/stopping registries
|
||||
|
||||
####################
|
||||
# start_registry # Run a local registry container
|
||||
####################
|
||||
#
|
||||
# Usage: start_registry [OPTIONS] NAME
|
||||
#
|
||||
# OPTIONS
|
||||
# --port=NNNN Port to listen on (default: 5000)
|
||||
# --testuser=XXX Require authentication; this is the username
|
||||
# --testpassword=XXX ...and the password (these two go together)
|
||||
# --with-cert Create a cert for running with TLS (not working)
|
||||
#
|
||||
# NAME is the container name to assign.
|
||||
#
|
||||
start_registry() {
|
||||
local port=5000
|
||||
local testuser=
|
||||
local testpassword=
|
||||
local create_cert=
|
||||
|
||||
# option processing: recognize options for running the registry
|
||||
# in different modes.
|
||||
local opt
|
||||
for opt; do
|
||||
local value=$(expr "$opt" : '[^=]*=\(.*\)')
|
||||
case "$opt" in
|
||||
--port=*) port="$value"; shift;;
|
||||
--testuser=*) testuser="$value"; shift;;
|
||||
--testpassword=*) testpassword="$value"; shift;;
|
||||
--with-cert) create_cert=1; shift;;
|
||||
-*) die "Invalid option '$opt'" ;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
local name=${1?start_registry() invoked without a NAME}
|
||||
|
||||
# Temp directory must be defined and must exist
|
||||
[[ -n $TESTDIR && -d $TESTDIR ]]
|
||||
|
||||
AUTHDIR=$TESTDIR/auth
|
||||
mkdir -p $AUTHDIR
|
||||
|
||||
local -a reg_args=(-v $AUTHDIR:/auth:Z -p $port:5000)
|
||||
|
||||
# cgroup option necessary under podman-in-podman (CI tests),
|
||||
# and doesn't seem to do any harm otherwise.
|
||||
PODMAN="podman --cgroup-manager=cgroupfs"
|
||||
|
||||
# Called with --testuser? Create an htpasswd file
|
||||
if [[ -n $testuser ]]; then
|
||||
if [[ -z $testpassword ]]; then
|
||||
die "start_registry() invoked with testuser but no testpassword"
|
||||
fi
|
||||
|
||||
if ! egrep -q "^$testuser:" $AUTHDIR/htpasswd; then
|
||||
$PODMAN run --rm --entrypoint htpasswd registry:2 \
|
||||
-Bbn $testuser $testpassword >> $AUTHDIR/htpasswd
|
||||
fi
|
||||
|
||||
reg_args+=(
|
||||
-e REGISTRY_AUTH=htpasswd
|
||||
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
|
||||
-e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm"
|
||||
)
|
||||
fi
|
||||
|
||||
# Called with --with-cert? Create certificates.
|
||||
if [[ -n $create_cert ]]; then
|
||||
CERT=$AUTHDIR/domain.crt
|
||||
if [ ! -e $CERT ]; then
|
||||
openssl req -newkey rsa:4096 -nodes -sha256 \
|
||||
-keyout $AUTHDIR/domain.key -x509 -days 2 \
|
||||
-out $CERT \
|
||||
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost"
|
||||
fi
|
||||
|
||||
reg_args+=(
|
||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt
|
||||
-e REGISTRY_HTTP_TLS_KEY=/auth/domain.key
|
||||
)
|
||||
|
||||
# Copy .crt file to a directory *without* the .key one, so we can
|
||||
# test the client. (If client sees a matching .key file, it fails)
|
||||
# Thanks to Miloslav Trmac for this hint.
|
||||
mkdir -p $TESTDIR/client-auth
|
||||
cp $CERT $TESTDIR/client-auth/
|
||||
fi
|
||||
|
||||
$PODMAN run -d --name $name "${reg_args[@]}" registry:2
|
||||
|
||||
# Wait for registry to actually come up
|
||||
timeout=10
|
||||
while [[ $timeout -ge 1 ]]; do
|
||||
if curl localhost:$port/; then
|
||||
return
|
||||
fi
|
||||
|
||||
timeout=$(expr $timeout - 1)
|
||||
sleep 1
|
||||
done
|
||||
die "Timed out waiting for registry container to respond on :$port"
|
||||
}
|
||||
|
||||
# END helpers for starting/stopping registries
|
||||
###############################################################################
|
||||
# BEGIN miscellaneous tools
|
||||
|
||||
###################
|
||||
# random_string # Returns a pseudorandom human-readable string
|
||||
###################
|
||||
#
|
||||
# Numeric argument, if present, is desired length of string
|
||||
#
|
||||
function random_string() {
|
||||
local length=${1:-10}
|
||||
|
||||
head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
|
||||
}
|
||||
|
||||
# END miscellaneous tools
|
||||
###############################################################################
|
||||
16
systemtest/run-tests
Executable file
16
systemtest/run-tests
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# run-tests - simple wrapper allowing shortcuts on invocation
|
||||
#
|
||||
|
||||
TEST_DIR=$(dirname $0)
|
||||
TESTS=$TEST_DIR
|
||||
|
||||
for i; do
|
||||
case "$i" in
|
||||
*.bats) TESTS=$i ;;
|
||||
*) TESTS=$(echo $TEST_DIR/*$i*.bats) ;;
|
||||
esac
|
||||
done
|
||||
|
||||
bats $TESTS
|
||||
78
vendor.conf
78
vendor.conf
@@ -1,51 +1,67 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image v2.0.0
|
||||
github.com/containers/buildah v1.8.4
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/containers/storage v1.12.10
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/Sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto a4c6cb3142f211c99e4bf4cd769535b29a9b616f
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker v1.13.0
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
github.com/docker/libtrust master
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
# docker/distributions dependencies
|
||||
# end of docker/distribution dependencies
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/image-spec 7b1e489870acb042978a3935d2fb76f8a79aff81
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools da84dc9dddc823a32f543e60323f841d12429c51
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
go4.org ce4c26f7be8eb27dc77f996b08d286dd80bc4a01 https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
golang.org/x/sys master
|
||||
github.com/opencontainers/selinux v1.1
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
501
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
501
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@@ -1,501 +0,0 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
|
||||
realize the consequences of renaming to lower-case. Due to the Go package
|
||||
environment, this caused issues. Regretfully, there's no turning back now.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
I am terribly sorry for this inconvenience. Logrus strives hard for backwards
|
||||
compatibility, and the author failed to realize the cascading consequences of
|
||||
such a name-change. To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/null"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := null.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safety
|
||||
|
||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
||||
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
64
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var handlers = []func(){}
|
||||
|
||||
func runHandler(handler func()) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
handler()
|
||||
}
|
||||
|
||||
func runHandlers() {
|
||||
for _, handler := range handlers {
|
||||
runHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
func Exit(code int) {
|
||||
runHandlers()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
275
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
275
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
@@ -1,275 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufferPool *sync.Pool
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str := string(serialized)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := make(Fields, len(entry.Data)+len(fields))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
||||
entry.Buffer = nil
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
} else {
|
||||
entry.Logger.mu.Lock()
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
||||
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
@@ -1,193 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.setLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
func WithError(err error) *Entry {
|
||||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
||||
45
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
45
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const DefaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
if t, ok := data["time"]; ok {
|
||||
data["fields.time"] = t
|
||||
}
|
||||
|
||||
if m, ok := data["msg"]; ok {
|
||||
data["fields.msg"] = m
|
||||
}
|
||||
|
||||
if l, ok := data["level"]; ok {
|
||||
data["fields.level"] = l
|
||||
}
|
||||
}
|
||||
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
74
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
74
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@@ -1,74 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for various fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
317
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
317
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
@@ -1,317 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
// Reusable empty entry
|
||||
entryPool sync.Pool
|
||||
}
|
||||
|
||||
type MutexWrap struct {
|
||||
lock sync.Mutex
|
||||
disabled bool
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Lock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Unlock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Disable() {
|
||||
mw.disabled = true
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) newEntry() *Entry {
|
||||
entry, ok := logger.entryPool.Get().(*Entry)
|
||||
if ok {
|
||||
return entry
|
||||
}
|
||||
return NewEntry(logger)
|
||||
}
|
||||
|
||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||
logger.entryPool.Put(entry)
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// If you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithFields(fields)
|
||||
}
|
||||
|
||||
// Add an error as single field to the log entry. All it does is call
|
||||
// `WithError` for the given `error`.
|
||||
func (logger *Logger) WithError(err error) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Printf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Println(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
//When file is opened with appending mode, it's safe to
|
||||
//write concurrently to a file (within 4k message on Linux).
|
||||
//In these cases user can choose to disable the lock.
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) setLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
143
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@@ -1,143 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
FatalLevel,
|
||||
ErrorLevel,
|
||||
WarnLevel,
|
||||
InfoLevel,
|
||||
DebugLevel,
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var (
|
||||
_ StdLogger = &log.Logger{}
|
||||
_ StdLogger = &Entry{}
|
||||
_ StdLogger = &Logger{}
|
||||
)
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
||||
|
||||
// The FieldLogger interface generalizes the Entry and Logger types
|
||||
type FieldLogger interface {
|
||||
WithField(key string, value interface{}) *Entry
|
||||
WithFields(fields Fields) *Entry
|
||||
WithError(err error) *Entry
|
||||
|
||||
Debugf(format string, args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warningf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
|
||||
Debug(args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Warn(args ...interface{})
|
||||
Warning(args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Panic(args ...interface{})
|
||||
|
||||
Debugln(args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
Println(args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
Warningln(args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
}
|
||||
10
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
return true
|
||||
}
|
||||
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
||||
28
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
28
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
var termios Termios
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// +build solaris,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
|
||||
return err == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
82
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
82
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@@ -1,82 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
|
||||
)
|
||||
|
||||
const (
|
||||
enableProcessedOutput = 0x0001
|
||||
enableWrapAtEolOutput = 0x0002
|
||||
enableVirtualTerminalProcessing = 0x0004
|
||||
)
|
||||
|
||||
func getVersion() (float64, error) {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
cmd := exec.Command("cmd", "ver")
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
|
||||
version := strings.Replace(stdout.String(), "\n", "", -1)
|
||||
version = strings.Replace(version, "\r\n", "", -1)
|
||||
|
||||
x1 := strings.Index(version, "[Version")
|
||||
|
||||
if x1 == -1 || strings.Index(version, "]") == -1 {
|
||||
return -1, errors.New("Can't determine Windows version")
|
||||
}
|
||||
|
||||
return strconv.ParseFloat(version[x1+9:x1+13], 64)
|
||||
}
|
||||
|
||||
func init() {
|
||||
ver, err := getVersion()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Activate Virtual Processing for Windows CMD
|
||||
// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||
if ver >= 10 {
|
||||
handle := syscall.Handle(os.Stderr.Fd())
|
||||
procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
|
||||
}
|
||||
}
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal(f io.Writer) bool {
|
||||
switch v := f.(type) {
|
||||
case *os.File:
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
189
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
189
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@@ -1,189 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 34
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
}
|
||||
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
||||
// Force disabling colors.
|
||||
DisableColors bool
|
||||
|
||||
// Disable timestamp logging. useful when output is redirected to logging
|
||||
// system that already adds timestamps.
|
||||
DisableTimestamp bool
|
||||
|
||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
// the time passed since beginning of execution.
|
||||
FullTimestamp bool
|
||||
|
||||
// TimestampFormat to use for display when a full timestamp is printed
|
||||
TimestampFormat string
|
||||
|
||||
// The fields are sorted by default for a consistent output. For applications
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// QuoteCharacter can be set to the override the default quoting character "
|
||||
// with something else. For example: ', or `.
|
||||
QuoteCharacter string
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if len(f.QuoteCharacter) == 0 {
|
||||
f.QuoteCharacter = "\""
|
||||
}
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = IsTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
}
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
levelColor = gray
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if !f.needsQuoting(value) {
|
||||
b.WriteString(value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
|
||||
}
|
||||
case error:
|
||||
errmsg := value.Error()
|
||||
if !f.needsQuoting(errmsg) {
|
||||
b.WriteString(errmsg)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(b, value)
|
||||
}
|
||||
}
|
||||
62
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
62
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (logger *Logger) Writer() *io.PipeWriter {
|
||||
return logger.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
func writerFinalizer(writer *io.PipeWriter) {
|
||||
writer.Close()
|
||||
}
|
||||
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||

|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||

|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||

|
||||
|
||||
This continues until we are done.
|
||||
|
||||

|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
||||
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
||||
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
202
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/README.md
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# continuity
|
||||
|
||||
[](https://godoc.org/github.com/containerd/continuity)
|
||||
[](https://travis-ci.org/containerd/continuity)
|
||||
|
||||
A transport-agnostic, filesystem metadata manifest system
|
||||
|
||||
This project is a staging area for experiments in providing transport agnostic
|
||||
metadata storage.
|
||||
|
||||
Please see https://github.com/opencontainers/specs/issues/11 for more details.
|
||||
|
||||
## Manifest Format
|
||||
|
||||
A continuity manifest encodes filesystem metadata in Protocol Buffers.
|
||||
Please refer to [proto/manifest.proto](proto/manifest.proto).
|
||||
|
||||
## Usage
|
||||
|
||||
Build:
|
||||
|
||||
```console
|
||||
$ make
|
||||
```
|
||||
|
||||
Create a manifest (of this repo itself):
|
||||
|
||||
```console
|
||||
$ ./bin/continuity build . > /tmp/a.pb
|
||||
```
|
||||
|
||||
Dump a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity ls /tmp/a.pb
|
||||
...
|
||||
-rw-rw-r-- 270 B /.gitignore
|
||||
-rw-rw-r-- 88 B /.mailmap
|
||||
-rw-rw-r-- 187 B /.travis.yml
|
||||
-rw-rw-r-- 359 B /AUTHORS
|
||||
-rw-rw-r-- 11 kB /LICENSE
|
||||
-rw-rw-r-- 1.5 kB /Makefile
|
||||
...
|
||||
-rw-rw-r-- 986 B /testutil_test.go
|
||||
drwxrwxr-x 0 B /version
|
||||
-rw-rw-r-- 478 B /version/version.go
|
||||
```
|
||||
|
||||
Verify a manifest:
|
||||
|
||||
```console
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
Break the directory and restore using the manifest:
|
||||
```console
|
||||
$ chmod 777 Makefile
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
|
||||
$ ./bin/continuity apply . /tmp/a.pb
|
||||
$ stat -c %a Makefile
|
||||
664
|
||||
$ ./bin/continuity verify . /tmp/a.pb
|
||||
```
|
||||
|
||||
|
||||
## Contribution Guide
|
||||
### Building Proto Package
|
||||
|
||||
If you change the proto file you will need to rebuild the generated Go with `go generate`.
|
||||
|
||||
```console
|
||||
$ go generate ./proto
|
||||
```
|
||||
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
85
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// PathDriver provides all of the path manipulation functions in a common
|
||||
// interface. The context should call these and never use the `filepath`
|
||||
// package or any other package to manipulate paths.
|
||||
type PathDriver interface {
|
||||
Join(paths ...string) string
|
||||
IsAbs(path string) bool
|
||||
Rel(base, target string) (string, error)
|
||||
Base(path string) string
|
||||
Dir(path string) string
|
||||
Clean(path string) string
|
||||
Split(path string) (dir, file string)
|
||||
Separator() byte
|
||||
Abs(path string) (string, error)
|
||||
Walk(string, filepath.WalkFunc) error
|
||||
FromSlash(path string) string
|
||||
ToSlash(path string) string
|
||||
Match(pattern, name string) (matched bool, err error)
|
||||
}
|
||||
|
||||
// pathDriver is a simple default implementation calls the filepath package.
|
||||
type pathDriver struct{}
|
||||
|
||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||
var LocalPathDriver PathDriver = &pathDriver{}
|
||||
|
||||
func (*pathDriver) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
func (*pathDriver) IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||
return filepath.Rel(base, target)
|
||||
}
|
||||
|
||||
func (*pathDriver) Base(path string) string {
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Dir(path string) string {
|
||||
return filepath.Dir(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Clean(path string) string {
|
||||
return filepath.Clean(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Split(path string) (dir, file string) {
|
||||
return filepath.Split(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Separator() byte {
|
||||
return filepath.Separator
|
||||
}
|
||||
|
||||
func (*pathDriver) Abs(path string) (string, error) {
|
||||
return filepath.Abs(path)
|
||||
}
|
||||
|
||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||
// overrides this method.
|
||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||
return filepath.Walk(root, walkFn)
|
||||
}
|
||||
|
||||
func (*pathDriver) FromSlash(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) ToSlash(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||
return filepath.Match(pattern, name)
|
||||
}
|
||||
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
13
vendor/github.com/containerd/continuity/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748
|
||||
github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626
|
||||
github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf
|
||||
github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265
|
||||
github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489
|
||||
github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
|
||||
golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
|
||||
golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924
|
||||
201
vendor/github.com/containers/buildah/LICENSE
generated
vendored
Normal file
201
vendor/github.com/containers/buildah/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
|
||||
|
||||
[](https://goreportcard.com/report/github.com/containers/buildah)
|
||||
[](https://travis-ci.org/containers/buildah)
|
||||
|
||||
The Buildah package provides a command line tool that can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
* mount a working container's root filesystem for manipulation
|
||||
* unmount a working container's root filesystem
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
* rename a local container
|
||||
|
||||
## Buildah Information for Developers
|
||||
|
||||
For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
|
||||
|
||||
**[Buildah Demos](demos)**
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**[Development Plan](developmentplan.md)**
|
||||
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
**[Troubleshooting Guide](troubleshooting.md)**
|
||||
|
||||
**[Tutorials](docs/tutorials)**
|
||||
|
||||
## Buildah and Podman relationship
|
||||
|
||||
Buildah and Podman are two complementary open-source projects that are
|
||||
available on most Linux platforms and both projects reside at
|
||||
[GitHub.com](https://github.com) with Buildah
|
||||
[here](https://github.com/containers/buildah) and Podman
|
||||
[here](https://github.com/containers/libpod). Both, Buildah and Podman are
|
||||
command line tools that work on Open Container Initiative (OCI) images and
|
||||
containers. The two projects differentiate in their specialization.
|
||||
|
||||
Buildah specializes in building OCI images. Buildah's commands replicate all
|
||||
of the commands that are found in a Dockerfile. This allows building images
|
||||
with and without Dockerfiles while not requiring any root privileges.
|
||||
Buildah’s ultimate goal is to provide a lower-level coreutils interface to
|
||||
build images. The flexibility of building images without Dockerfiles allows
|
||||
for the integration of other scripting languages into the build process.
|
||||
Buildah follows a simple fork-exec model and does not run as a daemon
|
||||
but it is based on a comprehensive API in golang, which can be vendored
|
||||
into other tools.
|
||||
|
||||
Podman specializes in all of the commands and functions that help you to maintain and modify
|
||||
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
|
||||
created from those images.
|
||||
|
||||
A major difference between Podman and Buildah is their concept of a container. Podman
|
||||
allows users to create "traditional containers" where the intent of these containers is
|
||||
to be long lived. While Buildah containers are really just created to allow content
|
||||
to be added back to the container image. An easy way to think of it is the
|
||||
`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
|
||||
command emulates the `docker run` command in functionality. Because of this and their underlying
|
||||
storage differences, you can not see Podman containers from within Buildah or vice versa.
|
||||
|
||||
In short, Buildah is an efficient way to create OCI images while Podman allows
|
||||
you to manage and maintain those images and containers in a production environment using
|
||||
familiar container cli commands. For more details, see the
|
||||
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
|
||||
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<"EOF"
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=$(buildah from "${1:-fedora}")
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run "$ctr1" -- dnf update -y
|
||||
buildah run "$ctr1" -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
|
||||
buildah config --port 80 "$ctr1"
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit "$ctr1" "${2:-$USER/lighttpd}"
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
|
||||
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
|
||||
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
|
||||
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
|
||||
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
|
||||
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
|
||||
| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. |
|
||||
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
|
||||
|
||||
**Future goals include:**
|
||||
* more CI tests
|
||||
* additional CLI commands (?)
|
||||
287
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
287
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/mman.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* Open Source projects like conda-forge, want to package podman and are based
|
||||
off of centos:6, Conda-force has minimal libc requirements and is lacking
|
||||
the memfd.h file, so we use mmam.h
|
||||
*/
|
||||
#ifndef MFD_ALLOW_SEALING
|
||||
#define MFD_ALLOW_SEALING 2U
|
||||
#endif
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 1U
|
||||
#endif
|
||||
|
||||
#ifndef F_LINUX_SPECIFIC_BASE
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
#ifndef F_ADD_SEALS
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
#endif
|
||||
#ifndef F_SEAL_SEAL
|
||||
#define F_SEAL_SEAL 0x0001LU
|
||||
#endif
|
||||
#ifndef F_SEAL_SHRINK
|
||||
#define F_SEAL_SHRINK 0x0002LU
|
||||
#endif
|
||||
#ifndef F_SEAL_GROW
|
||||
#define F_SEAL_GROW 0x0004LU
|
||||
#endif
|
||||
#ifndef F_SEAL_WRITE
|
||||
#define F_SEAL_WRITE 0x0008LU
|
||||
#endif
|
||||
|
||||
#define BUFSTEP 1024
|
||||
|
||||
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
|
||||
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
|
||||
static int _containers_unshare_parse_envint(const char *envname) {
|
||||
char *p, *q;
|
||||
long l;
|
||||
|
||||
p = getenv(envname);
|
||||
if (p == NULL) {
|
||||
return -1;
|
||||
}
|
||||
q = NULL;
|
||||
l = strtol(p, &q, 10);
|
||||
if ((q == NULL) || (*q != '\0')) {
|
||||
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
|
||||
_exit(1);
|
||||
}
|
||||
unsetenv(envname);
|
||||
return l;
|
||||
}
|
||||
|
||||
static void _check_proc_sys_file(const char *path)
|
||||
{
|
||||
FILE *fp;
|
||||
char buf[32];
|
||||
size_t n_read;
|
||||
long r;
|
||||
|
||||
fp = fopen(path, "r");
|
||||
if (fp == NULL) {
|
||||
if (errno != ENOENT)
|
||||
fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces);
|
||||
} else {
|
||||
memset(buf, 0, sizeof(buf));
|
||||
n_read = fread(buf, 1, sizeof(buf) - 1, fp);
|
||||
if (n_read > 0) {
|
||||
r = atoi(buf);
|
||||
if (r == 0) {
|
||||
fprintf(stderr, "User namespaces are not enabled in %s.\n", path);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
static char **parse_proc_stringlist(const char *list) {
|
||||
int fd, n, i, n_strings;
|
||||
char *buf, *new_buf, **ret;
|
||||
size_t size, new_size, used;
|
||||
|
||||
fd = open(list, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
buf = NULL;
|
||||
size = 0;
|
||||
used = 0;
|
||||
for (;;) {
|
||||
new_size = used + BUFSTEP;
|
||||
new_buf = realloc(buf, new_size);
|
||||
if (new_buf == NULL) {
|
||||
free(buf);
|
||||
fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
|
||||
return NULL;
|
||||
}
|
||||
buf = new_buf;
|
||||
size = new_size;
|
||||
memset(buf + used, '\0', size - used);
|
||||
n = read(fd, buf + used, size - used - 1);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read(): %m\n");
|
||||
return NULL;
|
||||
}
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
used += n;
|
||||
}
|
||||
close(fd);
|
||||
n_strings = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
n_strings++;
|
||||
}
|
||||
}
|
||||
ret = calloc(n_strings + 1, sizeof(char *));
|
||||
if (ret == NULL) {
|
||||
fprintf(stderr, "calloc(): out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
i = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
ret[i++] = &buf[n];
|
||||
}
|
||||
}
|
||||
ret[i] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int containers_reexec(void) {
|
||||
char **argv, *exename;
|
||||
int fd, mmfd, n_read, n_written;
|
||||
struct stat st;
|
||||
char buf[2048];
|
||||
|
||||
argv = parse_proc_stringlist("/proc/self/cmdline");
|
||||
if (argv == NULL) {
|
||||
return -1;
|
||||
}
|
||||
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fstat(fd, &st) == -1) {
|
||||
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
exename = basename(argv[0]);
|
||||
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
|
||||
if (mmfd == -1) {
|
||||
fprintf(stderr, "memfd_create(): %m\n");
|
||||
return -1;
|
||||
}
|
||||
for (;;) {
|
||||
n_read = read(fd, buf, sizeof(buf));
|
||||
if (n_read < 0) {
|
||||
fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_read == 0) {
|
||||
break;
|
||||
}
|
||||
n_written = write(mmfd, buf, n_read);
|
||||
if (n_written < 0) {
|
||||
fprintf(stderr, "write(anonfd): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_written != n_read) {
|
||||
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error sealing memfd copy: %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fexecve(mmfd, argv, environ) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error during reexec(...): %m\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void _containers_unshare(void)
|
||||
{
|
||||
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
|
||||
char buf[2048];
|
||||
|
||||
flags = _containers_unshare_parse_envint("_Containers-unshare");
|
||||
if (flags == -1) {
|
||||
return;
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (unshare(CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
|
||||
_check_proc_sys_file (_max_user_namespaces);
|
||||
_check_proc_sys_file (_unprivileged_user_namespaces);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
|
||||
if (pidfd != -1) {
|
||||
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
|
||||
size_t size = write(pidfd, buf, strlen(buf));
|
||||
if (size != strlen(buf)) {
|
||||
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
|
||||
_exit(1);
|
||||
}
|
||||
close(pidfd);
|
||||
}
|
||||
continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
|
||||
if (continuefd != -1) {
|
||||
n = read(continuefd, buf, sizeof(buf));
|
||||
if (n > 0) {
|
||||
fprintf(stderr, "Error: %.*s\n", n, buf);
|
||||
_exit(1);
|
||||
}
|
||||
close(continuefd);
|
||||
}
|
||||
sid = _containers_unshare_parse_envint("_Containers-setsid");
|
||||
if (sid == 1) {
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "Error during setsid: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
|
||||
if (pgrp == 1) {
|
||||
if (setpgrp() == -1) {
|
||||
fprintf(stderr, "Error during setpgrp: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
ctty = _containers_unshare_parse_envint("_Containers-ctty");
|
||||
if (ctty != -1) {
|
||||
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
|
||||
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (setresgid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresgid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
if (setresuid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresuid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & ~CLONE_NEWUSER) != 0) {
|
||||
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(...): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if (containers_reexec() != 0) {
|
||||
_exit(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
572
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
572
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
@@ -0,0 +1,572 @@
|
||||
// +build linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
// handles setting ID maps and other related settings by triggering
|
||||
// initialization code in the child.
|
||||
type Cmd struct {
|
||||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
Ctty *os.File
|
||||
OOMScoreAdj *int
|
||||
Hook func(pid int) error
|
||||
}
|
||||
|
||||
// Command creates a new Cmd which can be customized.
|
||||
func Command(args ...string) *Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
return &Cmd{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set an environment variable to tell the child to synchronize its startup.
|
||||
if c.Env == nil {
|
||||
c.Env = os.Environ()
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
|
||||
|
||||
// Please the libpod "rootless" package to find the expected env variables.
|
||||
if os.Geteuid() != 0 {
|
||||
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", os.Getegid()))
|
||||
}
|
||||
|
||||
// Create the pipe for reading the child's PID.
|
||||
pidRead, pidWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
|
||||
|
||||
// Create the pipe for letting the child know to proceed.
|
||||
continueRead, continueWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
pidRead.Close()
|
||||
pidWrite.Close()
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, continueRead)
|
||||
|
||||
// Pass along other instructions.
|
||||
if c.Setsid {
|
||||
c.Env = append(c.Env, "_Containers-setsid=1")
|
||||
}
|
||||
if c.Setpgrp {
|
||||
c.Env = append(c.Env, "_Containers-setpgrp=1")
|
||||
}
|
||||
if c.Ctty != nil {
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
|
||||
}
|
||||
|
||||
// Make sure we clean up our pipes.
|
||||
defer func() {
|
||||
if pidRead != nil {
|
||||
pidRead.Close()
|
||||
}
|
||||
if pidWrite != nil {
|
||||
pidWrite.Close()
|
||||
}
|
||||
if continueRead != nil {
|
||||
continueRead.Close()
|
||||
}
|
||||
if continueWrite != nil {
|
||||
continueWrite.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the new process.
|
||||
err = c.Cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
pidWrite.Close()
|
||||
pidWrite = nil
|
||||
|
||||
// Read the child's PID from the pipe.
|
||||
pidString := ""
|
||||
b := new(bytes.Buffer)
|
||||
io.Copy(b, pidRead)
|
||||
pidString = b.String()
|
||||
pid, err := strconv.Atoi(pidString)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error parsing PID %q", pidString)
|
||||
}
|
||||
pidString = fmt.Sprintf("%d", pid)
|
||||
|
||||
// If we created a new user namespace, set any specified mappings.
|
||||
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
// Always set "setgroups".
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if c.GidMappingsEnableSetgroups {
|
||||
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
} else {
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
|
||||
uidmap, gidmap, err := GetHostIDMappings("")
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
|
||||
return errors.Wrapf(err, "error reading ID mappings in parent")
|
||||
}
|
||||
if len(c.UidMappings) == 0 {
|
||||
c.UidMappings = uidmap
|
||||
for i := range c.UidMappings {
|
||||
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(c.GidMappings) == 0 {
|
||||
c.GidMappings = gidmap
|
||||
for i := range c.GidMappings {
|
||||
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.GidMappings) > 0 {
|
||||
// Build the GID map, since writing to the proc file has to be done all at once.
|
||||
g := new(bytes.Buffer)
|
||||
for _, m := range c.GidMappings {
|
||||
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
gidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewgidmap {
|
||||
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
gidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newgidmap: %v: %s", err, g.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
g.Reset()
|
||||
g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid())))
|
||||
}
|
||||
}
|
||||
if !gidmapSet {
|
||||
if c.UseNewgidmap {
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
|
||||
}
|
||||
defer gidmap.Close()
|
||||
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) > 0 {
|
||||
// Build the UID map, since writing to the proc file has to be done all at once.
|
||||
u := new(bytes.Buffer)
|
||||
for _, m := range c.UidMappings {
|
||||
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
uidmapSet := false
|
||||
// Set the GID map.
|
||||
if c.UseNewuidmap {
|
||||
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
err := cmd.Run()
|
||||
if err == nil {
|
||||
uidmapSet = true
|
||||
} else {
|
||||
logrus.Warnf("error running newuidmap: %v: %s", err, u.String())
|
||||
logrus.Warnf("falling back to single mapping")
|
||||
u.Reset()
|
||||
u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid())))
|
||||
}
|
||||
}
|
||||
if !uidmapSet {
|
||||
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
|
||||
}
|
||||
defer uidmap.Close()
|
||||
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OOMScoreAdj != nil {
|
||||
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
|
||||
}
|
||||
defer oomScoreAdj.Close()
|
||||
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
|
||||
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
|
||||
}
|
||||
}
|
||||
// Run any additional setup that we want to do before the child starts running proper.
|
||||
if c.Hook != nil {
|
||||
if err = c.Hook(pid); err != nil {
|
||||
fmt.Fprintf(continueWrite, "hook error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
func (c *Cmd) CombinedOutput() ([]byte, error) {
|
||||
return nil, errors.New("unshare: CombinedOutput() not implemented")
|
||||
}
|
||||
|
||||
func (c *Cmd) Output() ([]byte, error) {
|
||||
return nil, errors.New("unshare: Output() not implemented")
|
||||
}
|
||||
|
||||
var (
|
||||
isRootlessOnce sync.Once
|
||||
isRootless bool
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
isRootlessOnce.Do(func() {
|
||||
isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
|
||||
})
|
||||
return isRootless
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
}
|
||||
|
||||
type Runnable interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) {
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
} else {
|
||||
logrus.Errorf("%v", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
// If we've already been through this once, no need to try again.
|
||||
if os.Geteuid() == 0 && IsRootless() {
|
||||
return
|
||||
}
|
||||
|
||||
var uidNum, gidNum uint64
|
||||
// Figure out who we are.
|
||||
me, err := user.Current()
|
||||
if !os.IsNotExist(err) {
|
||||
bailOnError(err, "error determining current user")
|
||||
uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
|
||||
bailOnError(err, "error parsing current UID %s", me.Uid)
|
||||
gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
|
||||
bailOnError(err, "error parsing current GID %s", me.Gid)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// ID mappings to use to reexec ourselves.
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
if uidNum != 0 || evenForRoot {
|
||||
// Read the set of ID mappings that we're allowed to use. Each
|
||||
// range in /etc/subuid and /etc/subgid file is a starting host
|
||||
// ID and a range size.
|
||||
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
|
||||
if err != nil {
|
||||
logrus.Warnf("error reading allowed ID mappings: %v", err)
|
||||
}
|
||||
if len(uidmap) == 0 {
|
||||
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
|
||||
}
|
||||
// Map our UID and GID, then the subuid and subgid ranges,
|
||||
// consecutively, starting at 0, to get the mappings to use for
|
||||
// a copy of ourselves.
|
||||
uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
|
||||
gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
|
||||
var rangeStart uint32
|
||||
for i := range uidmap {
|
||||
uidmap[i].ContainerID = rangeStart
|
||||
rangeStart += uidmap[i].Size
|
||||
}
|
||||
rangeStart = 0
|
||||
for i := range gidmap {
|
||||
gidmap[i].ContainerID = rangeStart
|
||||
rangeStart += gidmap[i].Size
|
||||
}
|
||||
} else {
|
||||
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
|
||||
// to use unshare(), so don't bother creating a new user namespace at this point.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
bailOnError(err, "error reading the current capabilities sets")
|
||||
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
|
||||
return
|
||||
}
|
||||
// Read the set of ID mappings that we're currently using.
|
||||
uidmap, gidmap, err = GetHostIDMappings("")
|
||||
bailOnError(err, "error reading current ID mappings")
|
||||
// Just reuse them.
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
|
||||
// Unlike most uses of reexec or unshare, we're using a name that
|
||||
// _won't_ be recognized as a registered reexec handler, since we
|
||||
// _want_ to fall through reexec.Init() to the normal main().
|
||||
cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
|
||||
|
||||
// If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
|
||||
err = os.Setenv(UsernsEnvName, "1")
|
||||
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
|
||||
|
||||
// Set the default isolation type to use the "rootless" method.
|
||||
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
|
||||
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse our stdio.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up a new user namespace with the ID mapping.
|
||||
cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
|
||||
cmd.UseNewuidmap = uidNum != 0
|
||||
cmd.UidMappings = uidmap
|
||||
cmd.UseNewgidmap = uidNum != 0
|
||||
cmd.GidMappings = gidmap
|
||||
cmd.GidMappingsEnableSetgroups = true
|
||||
|
||||
// Finish up.
|
||||
logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
|
||||
ExecRunnable(cmd)
|
||||
}
|
||||
|
||||
// ExecRunnable runs the specified unshare command, captures its exit status,
|
||||
// and exits with the same status.
|
||||
func ExecRunnable(cmd Runnable) {
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
|
||||
if exitError.ProcessState.Exited() {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(waitStatus.ExitStatus())
|
||||
}
|
||||
if waitStatus.Signaled() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(int(waitStatus.Signal()) + 128)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Errorf("%v", err)
|
||||
logrus.Errorf("(unable to determine exit status)")
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// getHostIDMappings reads mappings from the named node under /proc.
|
||||
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
|
||||
var mappings []specs.LinuxIDMapping
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
|
||||
}
|
||||
cid, err := strconv.ParseUint(fields[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
|
||||
}
|
||||
hid, err := strconv.ParseUint(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
|
||||
}
|
||||
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
if pid == "" {
|
||||
pid = "self"
|
||||
}
|
||||
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
|
||||
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
mappings, err := idtools.NewIDMappings(user, group)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
|
||||
}
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
for _, m := range mappings.UIDs() {
|
||||
uidmap = append(uidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
for _, m := range mappings.GIDs() {
|
||||
gidmap = append(gidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build linux,cgo,!gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build linux,cgo,gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall -Wextra
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// This next bit is straight out of libcontainer.
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
69
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
69
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/blang/semver v3.5.0
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containernetworking/cni v0.7.0-rc2
|
||||
github.com/containers/image v2.0.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.1
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/containers/storage v1.12.10
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
|
||||
github.com/docker/docker-credential-helpers v0.6.1
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.3.2
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83
|
||||
github.com/fsouza/go-dockerclient v1.3.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gogo/protobuf v1.2.0
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/gorilla/mux v1.6.2
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/imdario/mergo v0.3.6
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.8.3
|
||||
github.com/mistifyio/go-zfs v2.1.1
|
||||
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# TODO: Gotty has not been updated since 2012. Can we find a replacement?
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools v0.8.0
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/openshift/imagebuilder v1.1.0
|
||||
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/seccomp/libseccomp-golang v0.9.0
|
||||
github.com/seccomp/containers-golang v0.1
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
|
||||
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
14
vendor/github.com/containers/image/README.md
generated
vendored
14
vendor/github.com/containers/image/README.md
generated
vendored
@@ -25,7 +25,7 @@ them as necessary, and to sign and verify images.
|
||||
The containers/image project is only a library with no user interface;
|
||||
you can either incorporate it into your Go programs, or use the `skopeo` tool:
|
||||
|
||||
The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
|
||||
The [skopeo](https://github.com/containers/skopeo) tool uses the
|
||||
containers/image library and takes advantage of many of its features,
|
||||
e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
|
||||
|
||||
@@ -42,7 +42,7 @@ What this project tests against dependencies-wise is located
|
||||
## Building
|
||||
|
||||
If you want to see what the library can do, or an example of how it is called,
|
||||
consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool
|
||||
consider starting with the [skopeo](https://github.com/containers/skopeo) tool
|
||||
instead.
|
||||
|
||||
To integrate this library into your project, put it into `$GOPATH` or use
|
||||
@@ -53,7 +53,7 @@ are also available
|
||||
|
||||
This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
|
||||
```sh
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
|
||||
@@ -65,13 +65,17 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## Contributing
|
||||
## [Contributing](CONTRIBUTING.md)
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
|
||||
|
||||
## License
|
||||
|
||||
ASL 2.0
|
||||
Apache License 2.0
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
## Contact
|
||||
|
||||
|
||||
650
vendor/github.com/containers/image/copy/copy.go
generated
vendored
650
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -2,54 +2,50 @@ package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb"
|
||||
"github.com/vbauerster/mpb/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// imageCopier allows us to keep track of diffID values for blobs, and other
|
||||
// data, that we're copying between images, and cache other information that
|
||||
// might allow us to take some shortcuts
|
||||
type imageCopier struct {
|
||||
copiedBlobs map[digest.Digest]digest.Digest
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
dest types.ImageDestination
|
||||
src types.Image
|
||||
rawSource types.ImageSource
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
reportWriter io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
}
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
@@ -82,10 +78,34 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
type Options struct {
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
@@ -95,11 +115,14 @@ type Options struct {
|
||||
DestinationCtx *types.SystemContext
|
||||
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
|
||||
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
|
||||
// manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type
|
||||
ForceManifestMIMEType string
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
@@ -115,13 +138,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
reportWriter = options.ReportWriter
|
||||
}
|
||||
|
||||
writeReport := func(f string, a ...interface{}) {
|
||||
fmt.Fprintf(reportWriter, f, a...)
|
||||
}
|
||||
|
||||
dest, err := destRef.NewImageDestination(options.DestinationCtx)
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
@@ -129,99 +148,176 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
}
|
||||
}()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx)
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
unparsedImage := image.UnparsedFromSource(rawSource)
|
||||
defer func() {
|
||||
if unparsedImage != nil {
|
||||
if err := unparsedImage.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
|
||||
}
|
||||
if err := rawSource.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (src: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
}
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
c := &copier{
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
copyInParallel: copyInParallel,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
// The simple case: Just copy a single image.
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// This is a manifest list. Choose a single image and copy it.
|
||||
// FIXME: Copy to destinations which support manifest lists, one image at a time.
|
||||
instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
}
|
||||
if multiImage {
|
||||
return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
// (the multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return nil, errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(unparsedImage)
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
unparsedImage = nil
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (source: %v)", err)
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
sourceManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading manifest from source image")
|
||||
}
|
||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if src.IsMultiImage() {
|
||||
return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
if options.RemoveSignatures {
|
||||
sigs = [][]byte{}
|
||||
} else {
|
||||
writeReport("Getting image source signatures\n")
|
||||
s, err := src.Signatures(context.TODO())
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.Signatures(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
return nil, errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
writeReport("Checking if image destination supports signatures\n")
|
||||
if err := dest.SupportsSignatures(); err != nil {
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
canModifyManifest := len(sigs) == 0
|
||||
manifestUpdates := types.ManifestUpdateOptions{}
|
||||
manifestUpdates.InformationOnly.Destination = dest
|
||||
ic := imageCopier{
|
||||
c: c,
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||
}
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
|
||||
|
||||
if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil {
|
||||
return err
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest)
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic := imageCopier{
|
||||
copiedBlobs: make(map[digest.Digest]digest.Digest),
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
manifestUpdates: &manifestUpdates,
|
||||
dest: dest,
|
||||
src: src,
|
||||
rawSource: rawSource,
|
||||
diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
|
||||
canModifyManifest: canModifyManifest,
|
||||
reportWriter: reportWriter,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
}
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
if err := ic.copyLayers(); err != nil {
|
||||
return err
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err := ic.copyUpdatedConfigAndManifest()
|
||||
manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -229,22 +325,22 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With !canModifyManifest, that would just be a string of repeated failures for the same reason,
|
||||
// With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if !canModifyManifest {
|
||||
return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
if !ic.canModifyManifest {
|
||||
return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
|
||||
for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
|
||||
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
|
||||
manifestUpdates.ManifestMIMEType = manifestMIMEType
|
||||
attemptedManifest, err := ic.copyUpdatedConfigAndManifest()
|
||||
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
|
||||
attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
|
||||
errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
|
||||
@@ -252,45 +348,54 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifest = attemptedManifest
|
||||
manifestBytes = attemptedManifest
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
writeReport("Storing signatures\n")
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs); err != nil {
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
if err := dest.Commit(); err != nil {
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifestBytes, nil
|
||||
}
|
||||
|
||||
func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error {
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
// Note that the method name Printf is not entirely arbitrary: (go tool vet)
|
||||
// has a built-in list of functions/methods (whatever object they are for)
|
||||
// which have their format strings checked; for other names we would have
|
||||
// to pass a parameter to every (go tool vet) invocation.
|
||||
func (c *copier) Printf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(c.reportWriter, format, a...)
|
||||
}
|
||||
|
||||
func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig()
|
||||
wantedOS := runtime.GOOS
|
||||
if sys != nil && sys.OSChoice != "" {
|
||||
wantedOS = sys.OSChoice
|
||||
}
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing image configuration")
|
||||
}
|
||||
osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS)
|
||||
if runtime.GOOS == "windows" && c.OS == "linux" {
|
||||
osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS)
|
||||
if wantedOS == "windows" && c.OS == "linux" {
|
||||
return osErr
|
||||
} else if runtime.GOOS != "windows" && c.OS == "windows" {
|
||||
} else if wantedOS != "windows" && c.OS == "windows" {
|
||||
return osErr
|
||||
}
|
||||
}
|
||||
@@ -298,57 +403,119 @@ func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageD
|
||||
}
|
||||
|
||||
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
|
||||
func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error {
|
||||
destRef := dest.Reference().DockerReference()
|
||||
func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
if ic.c.dest.IgnoresEmbeddedDockerReference() {
|
||||
return nil // Destination would prefer us not to update the embedded reference.
|
||||
}
|
||||
destRef := ic.c.dest.Reference().DockerReference()
|
||||
if destRef == nil {
|
||||
return nil // Destination does not care about Docker references
|
||||
}
|
||||
if !src.EmbeddedDockerReferenceConflicts(destRef) {
|
||||
if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
|
||||
return nil // No reference embedded in the manifest, or it matches destRef already.
|
||||
}
|
||||
|
||||
if !canModifyManifest {
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway",
|
||||
transports.ImageName(dest.Reference()), destRef.String())
|
||||
transports.ImageName(ic.c.dest.Reference()), destRef.String())
|
||||
}
|
||||
manifestUpdates.EmbeddedDockerReference = destRef
|
||||
ic.manifestUpdates.EmbeddedDockerReference = destRef
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers() error {
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
return terminal.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
destInfos := []types.BlobInfo{}
|
||||
diffIDs := []digest.Digest{}
|
||||
for _, srcLayer := range srcInfos {
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcInfosUpdated := false
|
||||
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
|
||||
}
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
}
|
||||
|
||||
type copyLayerData struct {
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
var copySemaphore *semaphore.Weighted
|
||||
if ic.c.copyInParallel {
|
||||
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
|
||||
} else {
|
||||
copySemaphore = semaphore.NewWeighted(int64(1))
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
destInfo = srcLayer
|
||||
fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name())
|
||||
} else {
|
||||
destInfo, diffID, err = ic.copyLayer(srcLayer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
|
||||
}
|
||||
destInfos = append(destInfos, destInfo)
|
||||
diffIDs = append(diffIDs, diffID)
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
copyGroup.Wait()
|
||||
}()
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
}
|
||||
if layerDigestsDiffer(srcInfos, destInfos) {
|
||||
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
|
||||
ic.manifestUpdates.LayerInfos = destInfos
|
||||
}
|
||||
return nil
|
||||
@@ -369,7 +536,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||
|
||||
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
|
||||
// stores the resulting config and manifest to the destination, and returns the stored manifest.
|
||||
func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
|
||||
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) {
|
||||
pendingImage := ic.src
|
||||
if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) {
|
||||
if !ic.canModifyManifest {
|
||||
@@ -379,45 +546,94 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
|
||||
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
|
||||
// So, this can only happen if we are trying to upload using one of the other MIME type candidates.
|
||||
// Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
|
||||
// when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
|
||||
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
|
||||
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
|
||||
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
|
||||
return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
|
||||
}
|
||||
pi, err := ic.src.UpdatedImage(*ic.manifestUpdates)
|
||||
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error creating an updated image manifest")
|
||||
}
|
||||
pendingImage = pi
|
||||
}
|
||||
manifest, _, err := pendingImage.Manifest()
|
||||
manifest, _, err := pendingImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
|
||||
if err := ic.copyConfig(pendingImage); err != nil {
|
||||
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n")
|
||||
if err := ic.dest.PutManifest(manifest); err != nil {
|
||||
ic.c.Printf("Writing manifest to image destination\n")
|
||||
if err := ic.c.dest.PutManifest(ctx, manifest); err != nil {
|
||||
return nil, errors.Wrap(err, "Error writing manifest")
|
||||
}
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// newProgressPool creates a *mpb.Progress and a cleanup function.
|
||||
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
|
||||
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
|
||||
return pool, func() {
|
||||
cancel()
|
||||
pool.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
bar := pool.AddBar(info.Size,
|
||||
mpb.BarClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(prefix),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
|
||||
),
|
||||
)
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (ic *imageCopier) copyConfig(src types.Image) error {
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob()
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
bar.SetTotal(int64(len(configBlob)), true)
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
@@ -435,61 +651,65 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
||||
bar.SetTotal(0, true)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
var diffIDResult diffIDResult // = {digest:""}
|
||||
|
||||
diffID := cachedDiffID
|
||||
if diffIDIsNeeded {
|
||||
diffIDResult = <-diffIDChan
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
}
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
return blobInfo, diffID, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -513,7 +733,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@@ -537,6 +757,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer s.Close()
|
||||
stream = s
|
||||
}
|
||||
|
||||
@@ -547,16 +768,16 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
|
||||
// perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canCompress bool) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
// === Process input through digestingReader to validate against the expected digest.
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
@@ -572,16 +793,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
// === Report progress using a pb.Reader.
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.Output = ic.reportWriter
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer bar.Finish()
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
@@ -590,13 +802,12 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
originalLayerReader = destStream
|
||||
}
|
||||
|
||||
// === Compress the layer if it is uncompressed and compression is desired
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
inputInfo = srcInfo
|
||||
} else {
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
@@ -607,21 +818,36 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
compressionOperation = types.Decompress
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer s.Close()
|
||||
destStream = s
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
// === Report progress using the ic.progress channel, if required.
|
||||
if ic.progress != nil && ic.progressInterval > 0 {
|
||||
// === Report progress using the c.progress channel, if required.
|
||||
if c.progress != nil && c.progressInterval > 0 {
|
||||
destStream = &progressReader{
|
||||
source: destStream,
|
||||
channel: ic.progress,
|
||||
interval: ic.progressInterval,
|
||||
channel: c.progress,
|
||||
interval: c.progressInterval,
|
||||
artifact: srcInfo,
|
||||
lastTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
@@ -644,6 +870,22 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
@@ -654,7 +896,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := gzip.NewWriter(dest)
|
||||
zipper := pgzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
|
||||
35
vendor/github.com/containers/image/copy/manifest.go
generated
vendored
35
vendor/github.com/containers/image/copy/manifest.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -37,15 +38,24 @@ func (os *orderedSet) append(s string) {
|
||||
}
|
||||
}
|
||||
|
||||
// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest.
|
||||
// Note that the conversion will only happen later, through src.UpdatedImage
|
||||
// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
|
||||
// Note that the conversion will only happen later, through ic.src.UpdatedImage
|
||||
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
|
||||
// and a list of other possible alternatives, in order.
|
||||
func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) {
|
||||
_, srcType, err := src.Manifest()
|
||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) {
|
||||
_, srcType, err := ic.src.Manifest(ctx)
|
||||
if err != nil { // This should have been cached?!
|
||||
return "", nil, errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||
if srcType != normalizedSrcType {
|
||||
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
|
||||
srcType = normalizedSrcType
|
||||
}
|
||||
|
||||
if forceManifestMIMEType != "" {
|
||||
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
|
||||
}
|
||||
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
|
||||
@@ -67,10 +77,10 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
|
||||
if _, ok := supportedByDest[srcType]; ok {
|
||||
prioritizedTypes.append(srcType)
|
||||
}
|
||||
if !canModifyManifest {
|
||||
// We could also drop the !canModifyManifest parameter and have the caller
|
||||
if !ic.canModifyManifest {
|
||||
// We could also drop the !ic.canModifyManifest check and have the caller
|
||||
// make the choice; it is already doing that to an extent, to improve error
|
||||
// messages. But it is nice to hide the “if !canModifyManifest, do no conversion”
|
||||
// messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion”
|
||||
// special case in here; the caller can then worry (or not) only about a good UI.
|
||||
logrus.Debugf("We can't modify the manifest, hoping for the best...")
|
||||
return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
|
||||
@@ -94,9 +104,18 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
|
||||
}
|
||||
preferredType := prioritizedTypes.list[0]
|
||||
if preferredType != srcType {
|
||||
manifestUpdates.ManifestMIMEType = preferredType
|
||||
ic.manifestUpdates.ManifestMIMEType = preferredType
|
||||
} else {
|
||||
logrus.Debugf("... will first try using the original manifest unmodified")
|
||||
}
|
||||
return preferredType, prioritizedTypes.list[1:], nil
|
||||
}
|
||||
|
||||
// isMultiImage returns true if img is a list of images
|
||||
func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) {
|
||||
_, mt, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return manifest.MIMETypeIsMultiImage(mt), nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/containers/image/copy/sign.go
generated
vendored
14
vendor/github.com/containers/image/copy/sign.go
generated
vendored
@@ -1,17 +1,13 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity.
|
||||
func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) {
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error initializing GPG")
|
||||
@@ -21,12 +17,12 @@ func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity s
|
||||
return nil, errors.Wrap(err, "Signing not supported")
|
||||
}
|
||||
|
||||
dockerReference := dest.Reference().DockerReference()
|
||||
dockerReference := c.dest.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
|
||||
fmt.Fprintf(reportWriter, "Signing manifest\n")
|
||||
c.Printf("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error creating signature")
|
||||
|
||||
159
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
159
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -1,22 +1,81 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const version = "Directory Transport Version: 1.1\n"
|
||||
|
||||
// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
|
||||
// using the 'dir' transport
|
||||
var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data")
|
||||
|
||||
type dirImageDestination struct {
|
||||
ref dirReference
|
||||
ref dirReference
|
||||
compress bool
|
||||
}
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(ref dirReference) types.ImageDestination {
|
||||
return &dirImageDestination{ref}
|
||||
// newImageDestination returns an ImageDestination for writing to a directory.
|
||||
func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) {
|
||||
d := &dirImageDestination{ref: ref, compress: compress}
|
||||
|
||||
// If directory exists check if it is empty
|
||||
// if not empty, check whether the contents match that of a container image directory and overwrite the contents
|
||||
// if the contents don't match throw an error
|
||||
dirExists, err := pathExists(d.ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath)
|
||||
}
|
||||
if dirExists {
|
||||
isEmpty, err := isDirEmpty(d.ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isEmpty {
|
||||
versionExists, err := pathExists(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath())
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check if contents of version file is what we expect it to be
|
||||
if string(contents) != version {
|
||||
return nil, ErrNotContainerImageDir
|
||||
}
|
||||
} else {
|
||||
return nil, ErrNotContainerImageDir
|
||||
}
|
||||
// delete directory contents so that only one image is in the directory at a time
|
||||
if err = removeDirContents(d.ref.resolvedPath); err != nil {
|
||||
return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath)
|
||||
}
|
||||
logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath)
|
||||
}
|
||||
} else {
|
||||
// create directory if it doesn't exist
|
||||
if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath)
|
||||
}
|
||||
}
|
||||
// create version file
|
||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
@@ -36,13 +95,15 @@ func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *dirImageDestination) SupportsSignatures() error {
|
||||
func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *dirImageDestination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
if d.compress {
|
||||
return types.Compress
|
||||
}
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
@@ -56,13 +117,26 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -78,6 +152,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(blobFile, tee)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -100,38 +175,38 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error {
|
||||
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
for i, sig := range signatures {
|
||||
if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil {
|
||||
return err
|
||||
@@ -144,6 +219,42 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dirImageDestination) Commit() error {
|
||||
func (d *dirImageDestination) Commit(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if path exists
|
||||
func pathExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// returns true if directory is empty
|
||||
func isDirEmpty(path string) (bool, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(files) == 0, nil
|
||||
}
|
||||
|
||||
// deletes the contents of a directory
|
||||
func removeDirContents(path string) error {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
34
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
34
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -35,7 +35,12 @@ func (s *dirImageSource) Close() error {
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
func (s *dirImageSource) GetManifest() ([]byte, string, error) {
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil {
|
||||
return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
}
|
||||
m, err := ioutil.ReadFile(s.ref.manifestPath())
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
@@ -43,24 +48,34 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) {
|
||||
return m, manifest.GuessMIMEType(m), err
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dirImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
||||
if err != nil {
|
||||
return nil, 0, nil
|
||||
return nil, -1, err
|
||||
}
|
||||
fi, err := r.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, nil
|
||||
return nil, -1, err
|
||||
}
|
||||
return r, fi.Size(), nil
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
// (e.g. if the source never returns manifest lists).
|
||||
func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if instanceDigest != nil {
|
||||
return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`)
|
||||
}
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
|
||||
@@ -74,3 +89,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||
}
|
||||
return signatures, nil
|
||||
}
|
||||
|
||||
// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified.
|
||||
func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
32
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
32
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@@ -1,18 +1,18 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/directory/explicitfilepath"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -134,29 +134,34 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
|
||||
return res
|
||||
}
|
||||
|
||||
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
src := newImageSource(ref)
|
||||
return image.FromSource(src)
|
||||
return image.FromSource(ctx, sys, src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ref), nil
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(ref), nil
|
||||
func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
compress := false
|
||||
if sys != nil {
|
||||
compress = sys.DirForceCompress
|
||||
}
|
||||
return newImageDestination(ref, compress)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
return errors.Errorf("Deleting images not implemented for dir: images")
|
||||
}
|
||||
|
||||
@@ -168,10 +173,15 @@ func (ref dirReference) manifestPath() string {
|
||||
// layerPath returns a path for a layer tarball within a directory using our conventions.
|
||||
func (ref dirReference) layerPath(digest digest.Digest) string {
|
||||
// FIXME: Should we keep the digest identification?
|
||||
return filepath.Join(ref.path, digest.Hex()+".tar")
|
||||
return filepath.Join(ref.path, digest.Hex())
|
||||
}
|
||||
|
||||
// signaturePath returns a path for a signature within a directory using our conventions.
|
||||
func (ref dirReference) signaturePath(index int) string {
|
||||
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
|
||||
}
|
||||
|
||||
// versionPath returns a path for the version file within a directory using our conventions.
|
||||
func (ref dirReference) versionPath() string {
|
||||
return filepath.Join(ref.path, "version")
|
||||
}
|
||||
|
||||
22
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
22
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
@@ -15,11 +16,7 @@ type archiveImageDestination struct {
|
||||
writer io.Closer
|
||||
}
|
||||
|
||||
func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||
if ref.destinationRef == nil {
|
||||
return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form <path>:<reference:tag>)")
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||
// ref.path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
@@ -39,13 +36,22 @@ func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
tarDest := tarfile.NewDestination(fh, ref.destinationRef)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarfile.NewDestination(fh, ref.destinationRef),
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
|
||||
func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
return types.Decompress
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
@@ -61,6 +67,6 @@ func (d *archiveImageDestination) Close() error {
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit() error {
|
||||
return d.Destination.Commit()
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context) error {
|
||||
return d.Destination.Commit(ctx)
|
||||
}
|
||||
|
||||
16
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
16
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -13,15 +14,18 @@ type archiveImageSource struct {
|
||||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
|
||||
func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) {
|
||||
if ref.destinationRef != nil {
|
||||
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
|
||||
}
|
||||
src := tarfile.NewSource(ref.path)
|
||||
src, err := tarfile.NewSourceFromFile(ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &archiveImageSource{
|
||||
Source: src,
|
||||
ref: ref,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
@@ -30,7 +34,7 @@ func (s *archiveImageSource) Reference() types.ImageReference {
|
||||
return s.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *archiveImageSource) Close() error {
|
||||
return nil
|
||||
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
|
||||
func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
29
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
29
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -40,7 +41,9 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
|
||||
// archiveReference is an ImageReference for Docker images.
|
||||
type archiveReference struct {
|
||||
destinationRef reference.NamedTagged // only used for destinations
|
||||
// only used for destinations
|
||||
// archiveReference.destinationRef is optional and can be nil for destinations as well.
|
||||
destinationRef reference.NamedTagged
|
||||
path string
|
||||
}
|
||||
|
||||
@@ -125,29 +128,33 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||
src := newImageSource(ctx, ref)
|
||||
return ctrImage.FromSource(src)
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := newImageSource(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ctrImage.FromSource(ctx, sys, src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref), nil
|
||||
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(ctx, ref)
|
||||
func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
// Not really supported, for safety reasons.
|
||||
return errors.New("Deleting images not implemented for docker-archive: images")
|
||||
}
|
||||
|
||||
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user