mirror of
https://github.com/containers/skopeo.git
synced 2026-02-02 15:29:34 +00:00
Compare commits
138 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41991bab70 | ||
|
|
2b5086167f | ||
|
|
b46d16f48c | ||
|
|
9fef0eb3f3 | ||
|
|
30b0a1741e | ||
|
|
945b9dc08f | ||
|
|
904b064da4 | ||
|
|
7ae62af073 | ||
|
|
7525a79c93 | ||
|
|
07287b5783 | ||
|
|
0a2a62ac20 | ||
|
|
5581c62a3a | ||
|
|
6b5bdb7563 | ||
|
|
2bdffc89c2 | ||
|
|
65e6449c95 | ||
|
|
2829f7da9e | ||
|
|
ece44c2842 | ||
|
|
0fa335c149 | ||
|
|
5c0ad57c2c | ||
|
|
b2934e7cf6 | ||
|
|
2af7114ea1 | ||
|
|
0e1cc9203e | ||
|
|
e255ccc145 | ||
|
|
9447a55b61 | ||
|
|
9fdceeb2b2 | ||
|
|
18ee5f8119 | ||
|
|
ab6a17059c | ||
|
|
81c5e94850 | ||
|
|
99dc83062a | ||
|
|
4d8ea6729f | ||
|
|
ac85091ecd | ||
|
|
ffa640c2b0 | ||
|
|
c73bcba7e6 | ||
|
|
329e1cf61c | ||
|
|
854f766dc7 | ||
|
|
5c73fdbfdc | ||
|
|
097549748a | ||
|
|
032309941b | ||
|
|
d93a581fb8 | ||
|
|
52075ab386 | ||
|
|
d65ae4b1d7 | ||
|
|
c32d27f59e | ||
|
|
883d65a54a | ||
|
|
94728fb73f | ||
|
|
520f0e5ddb | ||
|
|
fa39b49a5c | ||
|
|
0490018903 | ||
|
|
b434c8f424 | ||
|
|
79de2d9f09 | ||
|
|
2031e17b3c | ||
|
|
5a050c1383 | ||
|
|
404c5bd341 | ||
|
|
2134209960 | ||
|
|
1e8c029562 | ||
|
|
932b037d66 | ||
|
|
26a48586a0 | ||
|
|
683f4263ef | ||
|
|
ebfa1e936b | ||
|
|
509782e78b | ||
|
|
776b408f76 | ||
|
|
fee5981ebf | ||
|
|
d9e9604979 | ||
|
|
3606380bdb | ||
|
|
640b967463 | ||
|
|
b8b9913695 | ||
|
|
9e2720dfcc | ||
|
|
b329dd0d4e | ||
|
|
1b10352591 | ||
|
|
bba2874451 | ||
|
|
0322441640 | ||
|
|
8868d2ebe4 | ||
|
|
f19acc1c90 | ||
|
|
47f24b4097 | ||
|
|
c2597aab22 | ||
|
|
47065938da | ||
|
|
790620024e | ||
|
|
42b01df89e | ||
|
|
aafae2bc50 | ||
|
|
e5b9ea5ee6 | ||
|
|
1c2ff140cb | ||
|
|
f7c608e65e | ||
|
|
ec810c91fe | ||
|
|
17bea86e89 | ||
|
|
3e0026d907 | ||
|
|
3e98377bf2 | ||
|
|
0658bc80f3 | ||
|
|
e96a9b0e1b | ||
|
|
08c30b8f06 | ||
|
|
05212df1c5 | ||
|
|
7ec68dd463 | ||
|
|
6eb5131b85 | ||
|
|
736cd7641d | ||
|
|
78bd5dd3df | ||
|
|
ecd675e0a6 | ||
|
|
5675895460 | ||
|
|
0f8f870bd3 | ||
|
|
a51e38e60d | ||
|
|
8fe1595f92 | ||
|
|
2497f500d5 | ||
|
|
afa92d58f6 | ||
|
|
958cafb2c0 | ||
|
|
1d1bf0d393 | ||
|
|
3094320203 | ||
|
|
39de98777d | ||
|
|
8084f6f4e2 | ||
|
|
6ef45e5cf1 | ||
|
|
444b90a9cf | ||
|
|
72a3dc17ee | ||
|
|
88c748f47a | ||
|
|
7e8c89d619 | ||
|
|
694f915003 | ||
|
|
a77b409619 | ||
|
|
1faff791ce | ||
|
|
8b8afe0fda | ||
|
|
09a120a59b | ||
|
|
c769c7789e | ||
|
|
3ea3965e5e | ||
|
|
ee8391db34 | ||
|
|
e1cc97d9d7 | ||
|
|
f30756a9bb | ||
|
|
33b474b224 | ||
|
|
485a7aa330 | ||
|
|
59117e6e3d | ||
|
|
8ee3ead743 | ||
|
|
bc39e4f9b6 | ||
|
|
3017d87ade | ||
|
|
d8f1d4572b | ||
|
|
41d8dd8b80 | ||
|
|
bcf3dbbb93 | ||
|
|
bfc0c5e531 | ||
|
|
013ebac8d8 | ||
|
|
fbc2e4f70f | ||
|
|
72468d6817 | ||
|
|
5dec940523 | ||
|
|
761a6811c1 | ||
|
|
b3a023f9dd | ||
|
|
5aa217fe0d | ||
|
|
737438d026 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/docs/skopeo.1
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
@@ -21,4 +22,4 @@ install:
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make check ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
|
||||
@@ -115,6 +115,35 @@ Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:17.10
|
||||
FROM ubuntu:18.10
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
|
||||
55
Makefile
55
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container docs build-local clean install install-binary install-completions shell test-integration vendor
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -22,15 +22,15 @@ CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(shell go env GOOS), linux)
|
||||
ifeq ($(shell $(GO) env GOOS), linux)
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
|
||||
@@ -50,10 +50,12 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
OSTREE_BUILD_TAG = $(shell hack/ostree_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(OSTREE_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
@@ -64,18 +66,31 @@ endif
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
all: binary docs-in-container
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'binary-local' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@echo " * 'check' - Including above validate, test-integration and test-unit"
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label:disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label:disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using containers
|
||||
@@ -88,10 +103,15 @@ binary-local-static:
|
||||
build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
clean:
|
||||
rm -f skopeo docs/*.1
|
||||
@@ -107,9 +127,9 @@ install-binary: ./skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
|
||||
install-docs: docs/skopeo.1
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/skopeo.1 ${MANINSTALLDIR}/man1/skopeo.1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
@@ -140,5 +160,10 @@ validate-local:
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor: vendor.conf
|
||||
vndr -whitelist '^github.com/containers/image/docs/.*'
|
||||
.install.vndr:
|
||||
$(GO) get -u github.com/LK4D4/vndr
|
||||
|
||||
vendor: vendor.conf .install.vndr
|
||||
$(GOPATH)/bin/vndr \
|
||||
-whitelist '^github.com/containers/image/docs/.*' \
|
||||
-whitelist '^github.com/containers/image/registries.conf'
|
||||
|
||||
29
README.md
29
README.md
@@ -229,34 +229,7 @@ NOT TODO
|
||||
CONTRIBUTING
|
||||
-
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
@@ -11,100 +11,35 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.
|
||||
func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {
|
||||
sourceCtx, err := contextFromGlobalOptions(c, "src-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
additionalTags cli.StringSlice // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
|
||||
destinationCtx, err := contextFromGlobalOptions(c, "dest-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sourceCtx, destinationCtx, nil
|
||||
}
|
||||
|
||||
func copyHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 2 {
|
||||
cli.ShowCommandHelp(c, "copy")
|
||||
return errors.New("Exactly two arguments expected")
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
}
|
||||
|
||||
policyContext, err := getPolicyContext(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(c.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", c.Args()[1], err)
|
||||
}
|
||||
signBy := c.String("sign-by")
|
||||
removeSignatures := c.Bool("remove-signatures")
|
||||
|
||||
sourceCtx, destinationCtx, err := contextsFromGlobalOptions(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if c.IsSet("format") {
|
||||
switch c.String("format") {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", c.String("format"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.IsSet("additional-tag") {
|
||||
for _, image := range c.StringSlice("additional-tag") {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
return cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
@@ -113,86 +48,112 @@ var copyCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: copyHandler,
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-creds, screds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the source registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-creds, dcreds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the destination registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "src-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container source registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "dest-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container destination registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-ostree-tmp-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to fetch retrieved blobs (OCI layout sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to store retrieved blobs (OCI layout destinations only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dest-compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon destinations only)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: commandAction(opts.run),
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: append(append(append([]cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "Suppress output information when copying images",
|
||||
Destination: &opts.quiet,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
Destination: &opts.removeSignatures,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
Destination: &opts.signByFingerprint,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageNames[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(imageNames[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", imageNames[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range opts.additionalTags {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
@@ -10,30 +11,22 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func deleteHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
func deleteCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
@@ -41,26 +34,33 @@ var deleteCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: deleteHandler,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageName, err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}
|
||||
|
||||
75
cmd/skopeo/flag.go
Normal file
75
cmd/skopeo/flag.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// newOptionalBool
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func newOptionalBoolValue(p *optionalBool) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
239
cmd/skopeo/flag_test.go
Normal file
239
cmd/skopeo/flag_test.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OB",
|
||||
Value: newOptionalBoolValue(&globalOB),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OB",
|
||||
Value: newOptionalBoolValue(&commandOB),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OB",
|
||||
Value: newOptionalBoolValue(&ob),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OS",
|
||||
Value: newOptionalStringValue(&globalOS),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OS",
|
||||
Value: newOptionalStringValue(&commandOS),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OS",
|
||||
Value: newOptionalStringValue(&os),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -29,10 +30,24 @@ type inspectOutput struct {
|
||||
Layers []string
|
||||
}
|
||||
|
||||
var inspectCmd = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
|
||||
Supported transports:
|
||||
@@ -40,101 +55,121 @@ var inspectCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
|
||||
img, err := parseImage(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Bool("raw") {
|
||||
_, err := c.App.Writer.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(out))
|
||||
return nil
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest or configuration",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "config",
|
||||
Usage: "output configuration",
|
||||
Destination: &opts.config,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img, err := parseImage(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.config && opts.raw {
|
||||
configBlob, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %v", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,125 +2,149 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var layersCmd = cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
ctx, cancel := commandTimeoutContextFromGlobalOptions(c)
|
||||
defer cancel()
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range args[1:] {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawSource, err := parseImageSource(ctx, c, c.Args()[0])
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit(ctx)
|
||||
},
|
||||
return dest.Commit(ctx)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/skopeo/version"
|
||||
@@ -15,8 +17,22 @@ import (
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
// createApp returns a cli.App to be run or tested.
|
||||
func createApp() *cli.App {
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cli.App, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
@@ -28,89 +44,112 @@ func createApp() *cli.App {
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &opts.debug,
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
cli.GenericFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "policy",
|
||||
Value: "",
|
||||
Usage: "Path to a trust policy file",
|
||||
Name: "policy",
|
||||
Usage: "Path to a trust policy file",
|
||||
Destination: &opts.policyPath,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Destination: &opts.insecurePolicy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Name: "registries.d",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Destination: &opts.registriesDirPath,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-arch",
|
||||
Value: "",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Name: "override-arch",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Destination: &opts.overrideArch,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-os",
|
||||
Value: "",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Name: "override-os",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Destination: &opts.overrideOS,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to the registries.conf file",
|
||||
Destination: &opts.registriesConfPath,
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalIsSet("tls-verify") {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
copyCmd,
|
||||
inspectCmd,
|
||||
layersCmd,
|
||||
deleteCmd,
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
copyCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
untrustedSignatureDumpCmd(),
|
||||
}
|
||||
return app
|
||||
return app, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(ctx *cli.Context) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext handles the global "policy" flag.
|
||||
func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
// getPolicyContext returns a *signature.PolicyContext based on opts.
|
||||
func (opts *globalOptions) getPolicyContext() (*signature.PolicyContext, error) {
|
||||
var policy *signature.Policy // This could be cached across calls in opts.
|
||||
var err error
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
if opts.insecurePolicy {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
} else if opts.policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
policy, err = signature.NewPolicyFromFile(opts.policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// commandTimeoutContext returns a context.Context and a cancellation callback based on opts.
|
||||
// The caller should usually "defer cancel()" immediately after calling this.
|
||||
func (opts *globalOptions) commandTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if opts.commandTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, opts.commandTimeout)
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import "bytes"
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
|
||||
@@ -3,17 +3,31 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func manifestDigest(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() cli.Command {
|
||||
opts := manifestDigestOptions{}
|
||||
return cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -23,13 +37,6 @@ func manifestDigest(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(context.App.Writer, "%s\n", digest)
|
||||
fmt.Fprintf(stdout, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifestDigestCmd = cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: manifestDigest,
|
||||
}
|
||||
|
||||
@@ -4,20 +4,41 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func standaloneSign(c *cli.Context) error {
|
||||
outputFile := c.String("output")
|
||||
if len(c.Args()) != 3 || outputFile == "" {
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
}
|
||||
|
||||
func standaloneSignCmd() cli.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
Destination: &opts.output,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 3 || opts.output == "" {
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
dockerReference := c.Args()[1]
|
||||
fingerprint := c.Args()[2]
|
||||
manifestPath := args[0]
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -34,33 +55,33 @@ func standaloneSign(c *cli.Context) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(outputFile, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", outputFile, err)
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneSignCmd = cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: standaloneSign,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
},
|
||||
},
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerify(c *cli.Context) error {
|
||||
if len(c.Args()) != 4 {
|
||||
func standaloneVerifyCmd() cli.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 4 {
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
expectedDockerReference := c.Args()[1]
|
||||
expectedFingerprint := c.Args()[2]
|
||||
signaturePath := c.Args()[3]
|
||||
manifestPath := args[0]
|
||||
expectedDockerReference := args[1]
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -81,22 +102,35 @@ func standaloneVerify(c *cli.Context) error {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(c.App.Writer, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
fmt.Fprintf(stdout, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneVerifyCmd = cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
func untrustedSignatureDumpCmd() cli.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
return cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := c.Args()[0]
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
@@ -111,20 +145,6 @@ func untrustedSignatureDump(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(untrustedOut))
|
||||
fmt.Fprintln(stdout, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
11
cmd/skopeo/unshare.go
Normal file
11
cmd/skopeo/unshare.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
46
cmd/skopeo/unshare_linux.go
Normal file
46
cmd/skopeo/unshare_linux.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/buildah/pkg/unshare"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
capability.CAP_CHOWN,
|
||||
capability.CAP_DAC_OVERRIDE,
|
||||
capability.CAP_FOWNER,
|
||||
capability.CAP_FSETID,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_SETFCAP,
|
||||
}
|
||||
|
||||
func maybeReexec() error {
|
||||
// With Skopeo we need only the subset of the root capabilities necessary
|
||||
// for pulling an image to the storage. Do not attempt to create a namespace
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
// We miss a capability we need, create a user namespaces
|
||||
unshare.MaybeReexecUsingUserNamespace(true)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage are used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
if alltransports.TransportFromImageName(imageName).Name() == storage.Transport.Name() {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -10,43 +11,182 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemContext, error) {
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the cli.ActionFunc interface and the real handler,
|
||||
// primarily to ensure that cli.Context is not available to the handler, which in turn
|
||||
// makes sure that the cli.String() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the Destination: members of cli.Flag.
|
||||
// handler may return errorShouldDisplayUsage to cause cli.ShowSubcommandHelp to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := handler(([]string)(c.Args()), c.App.Writer)
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
cli.ShowSubcommandHelp(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() ([]cli.Flag, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
return []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
Destination: &opts.authFilePath,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageOptions) {
|
||||
opts := imageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
}
|
||||
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
credsOptionExtra := ""
|
||||
if credsOptionAlias != "" {
|
||||
credsOptionExtra += "," + credsOptionAlias
|
||||
}
|
||||
|
||||
return []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "creds" + credsOptionExtra,
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
Value: newOptionalStringValue(&opts.credsOption),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "cert-dir",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon",
|
||||
Destination: &opts.dockerCertPath,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)",
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "shared-blob-dir",
|
||||
Usage: "`DIRECTORY` to use to share blobs across OCI repositories",
|
||||
Destination: &opts.sharedBlobDir,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "daemon-host",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "no-creds",
|
||||
Usage: "Access the registry anonymously",
|
||||
Destination: &opts.noCreds,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: c.GlobalString("registries.d"),
|
||||
ArchitectureChoice: c.GlobalString("override-arch"),
|
||||
OSChoice: c.GlobalString("override-os"),
|
||||
DockerCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
// DEPRECATED: keep this here for backward compatibility, but override
|
||||
// them if per subcommand flags are provided (see below).
|
||||
DockerInsecureSkipTLSVerify: !c.GlobalBoolT("tls-verify"),
|
||||
OSTreeTmpDirPath: c.String(flagPrefix + "ostree-tmp-dir"),
|
||||
OCISharedBlobDirPath: c.String(flagPrefix + "shared-blob-dir"),
|
||||
DirForceCompress: c.Bool(flagPrefix + "compress"),
|
||||
AuthFilePath: c.String("authfile"),
|
||||
DockerDaemonHost: c.String(flagPrefix + "daemon-host"),
|
||||
DockerDaemonCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
DockerDaemonInsecureSkipTLSVerify: !c.BoolT(flagPrefix + "tls-verify"),
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
SystemRegistriesConfPath: opts.global.registriesConfPath,
|
||||
}
|
||||
if c.IsSet(flagPrefix + "tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT(flagPrefix + "tls-verify")
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
if c.IsSet(flagPrefix + "creds") {
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.global.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.global.tlsVerify.value)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String(flagPrefix + "creds"))
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func commandTimeoutContextFromGlobalOptions(c *cli.Context) (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if c.GlobalDuration("command-timeout") > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, c.GlobalDuration("command-timeout"))
|
||||
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
osTreeTmpDir string // A directory to use for OSTree temporary files
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
|
||||
return append(genericFlags, []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "ostree-tmp-dir",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
Destination: &opts.osTreeTmpDir,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
Destination: &opts.dirForceCompression,
|
||||
},
|
||||
}...), &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx, err := opts.imageOptions.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ctx, cancel
|
||||
|
||||
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
@@ -76,13 +216,12 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
func parseImage(ctx context.Context, opts *imageOptions, name string) (types.ImageCloser, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -91,12 +230,12 @@ func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error)
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, c *cli.Context, name string) (types.ImageSource, error) {
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
184
cmd/skopeo/utils_test.go
Normal file
184
cmd/skopeo/utils_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) *globalOptions {
|
||||
app, opts := createApp()
|
||||
|
||||
flagSet := flag.NewFlagSet(app.Name, flag.ContinueOnError)
|
||||
for _, f := range app.Flags {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(flags)
|
||||
require.NoError(t, err)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageDestOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-ostree-tmp-dir", "/srv/ostree-tmp-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
OSTreeTmpDirPath: "/srv/ostree-tmp-dir",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DirForceCompress: true,
|
||||
}, res)
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -5,20 +5,37 @@
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
case "$prev" in
|
||||
$options_with_args)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" -o "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
COMPREPLY=( $( compgen -W "$transports" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
${PROG} $subcommand --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
@@ -38,9 +55,15 @@ _skopeo_copy() {
|
||||
local boolean_options="
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
@@ -50,15 +73,22 @@ _skopeo_inspect() {
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
@@ -89,50 +119,56 @@ _skopeo_delete() {
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -150,15 +186,17 @@ _cli_bash_autocomplete() {
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "!${words[$counter]}" in
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
|
||||
83
docs/skopeo-copy.1.md
Normal file
83
docs/skopeo-copy.1.md
Normal file
@@ -0,0 +1,83 @@
|
||||
% skopeo-copy(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
52
docs/skopeo-delete.1.md
Normal file
52
docs/skopeo-delete.1.md
Normal file
@@ -0,0 +1,52 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
71
docs/skopeo-inspect.1.md
Normal file
71
docs/skopeo-inspect.1.md
Normal file
@@ -0,0 +1,71 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
26
docs/skopeo-manifest-digest.1.md
Normal file
26
docs/skopeo-manifest-digest.1.md
Normal file
@@ -0,0 +1,26 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
34
docs/skopeo-standalone-sign.1.md
Normal file
34
docs/skopeo-standalone-sign.1.md
Normal file
@@ -0,0 +1,34 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
36
docs/skopeo-standalone-verify.1.md
Normal file
36
docs/skopeo-standalone-verify.1.md
Normal file
@@ -0,0 +1,36 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
243
docs/skopeo.1.md
243
docs/skopeo.1.md
@@ -1,11 +1,13 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
# NAME
|
||||
## NAME
|
||||
skopeo -- Command line utility used to interact with local and remote container images and container image registries
|
||||
# SYNOPSIS
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
# DESCRIPTION
|
||||
|
||||
## DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries.
|
||||
|
||||
`skopeo` can copy container images between various containers image stores, converting them as necessary. For example you can use `skopeo` to copy container images from one container registry to another.
|
||||
@@ -31,7 +33,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -45,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
# OPTIONS
|
||||
## OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
@@ -65,230 +67,29 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
# COMMANDS
|
||||
## COMMANDS
|
||||
|
||||
## skopeo copy
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## skopeo delete
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## skopeo inspect
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## skopeo manifest-digest
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## skopeo standalone-sign
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## skopeo standalone-verify
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## skopeo help
|
||||
show help for `skopeo`
|
||||
|
||||
# FILES
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
# EXAMPLES
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
|
||||
## skopeo copy
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
## skopeo delete
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
## skopeo inspect
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
## skopeo layers
|
||||
Another method to retrieve the layers for the busybox image from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo layers docker://busybox
|
||||
$ ls layers-500650331/
|
||||
8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
manifest.json
|
||||
a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar
|
||||
```
|
||||
## skopeo manifest-digest
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
## skopeo standalone-sign
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
See `skopeo copy` above for the preferred method of signing images.
|
||||
## skopeo standalone-verify
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
kpod-login(1), docker-login(1)
|
||||
|
||||
# AUTHORS
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
7
hack/btrfs_installed_tag.sh
Executable file
7
hack/btrfs_installed_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo exclude_graphdriver_btrfs
|
||||
fi
|
||||
6
hack/ostree_tag.sh
Executable file
6
hack/ostree_tag.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
if pkg-config ostree-1 2> /dev/null ; then
|
||||
echo ostree
|
||||
else
|
||||
echo containers_image_ostree_stub
|
||||
fi
|
||||
13
hack/tree_status.sh
Executable file
13
hack/tree_status.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
else
|
||||
echo "tree is dirty, please commit all changes and sync the vendor.conf"
|
||||
echo ""
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
fi
|
||||
@@ -662,3 +662,37 @@ func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
28
integration/fixtures/registries.conf
Normal file
28
integration/fixtures/registries.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
[[registry]]
|
||||
location = "mirror.invalid"
|
||||
mirror = [
|
||||
{ location = "mirror-0.invalid" },
|
||||
{ location = "mirror-1.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
# This entry is currently unused and exists only to ensure
|
||||
# that the mirror.invalid/busybox is not rewritten twice.
|
||||
[[registry]]
|
||||
location = "gcr.io"
|
||||
prefix = "gcr.io/google-containers"
|
||||
|
||||
[[registry]]
|
||||
location = "invalid.invalid"
|
||||
mirror = [
|
||||
{ location = "invalid-mirror-0.invalid" },
|
||||
{ location = "invalid-mirror-1.invalid" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "gcr.invalid"
|
||||
prefix = "gcr.invalid/foo/bar"
|
||||
mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
69
vendor.conf
69
vendor.conf
@@ -1,18 +1,24 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
github.com/containers/image 2c0349c99af7d90694b3faa0e9bde404d407b145
|
||||
github.com/containers/buildah 810efa340ab43753034e2ed08ec290e4abab7e72
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/containers/storage v1.12.3
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto a4c6cb3142f211c99e4bf4cd769535b29a9b616f
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
@@ -21,46 +27,41 @@ github.com/vbatts/tar-split v0.10.2
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
# docker/distributions dependencies
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
# end of docker/distribution dependencies
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec unique-ids https://github.com/mtrmac/image-spec
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/image-spec 7b1e489870acb042978a3935d2fb76f8a79aff81
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
go4.org ce4c26f7be8eb27dc77f996b08d286dd80bc4a01 https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
k8s.io/client-go master
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
golang.org/x/sys master
|
||||
github.com/opencontainers/selinux v1.1
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||

|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||

|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||

|
||||
|
||||
This continues until we are done.
|
||||
|
||||

|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
||||
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
|
||||
|
||||
[](https://goreportcard.com/report/github.com/containers/buildah)
|
||||
[](https://travis-ci.org/containers/buildah)
|
||||
|
||||
The Buildah package provides a command line tool that can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
* mount a working container's root filesystem for manipulation
|
||||
* unmount a working container's root filesystem
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
* rename a local container
|
||||
|
||||
## Buildah Information for Developers
|
||||
|
||||
For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
|
||||
|
||||
**[Buildah Demos](demos)**
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**[Development Plan](developmentplan.md)**
|
||||
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
**[Troubleshooting Guide](troubleshooting.md)**
|
||||
|
||||
**[Tutorials](docs/tutorials)**
|
||||
|
||||
## Buildah and Podman relationship
|
||||
|
||||
Buildah and Podman are two complementary open-source projects that are
|
||||
available on most Linux platforms and both projects reside at
|
||||
[GitHub.com](https://github.com) with Buildah
|
||||
[here](https://github.com/containers/buildah) and Podman
|
||||
[here](https://github.com/containers/libpod). Both, Buildah and Podman are
|
||||
command line tools that work on Open Container Initiative (OCI) images and
|
||||
containers. The two projects differentiate in their specialization.
|
||||
|
||||
Buildah specializes in building OCI images. Buildah's commands replicate all
|
||||
of the commands that are found in a Dockerfile. This allows building images
|
||||
with and without Dockerfiles while not requiring any root privileges.
|
||||
Buildah’s ultimate goal is to provide a lower-level coreutils interface to
|
||||
build images. The flexibility of building images without Dockerfiles allows
|
||||
for the integration of other scripting languages into the build process.
|
||||
Buildah follows a simple fork-exec model and does not run as a daemon
|
||||
but it is based on a comprehensive API in golang, which can be vendored
|
||||
into other tools.
|
||||
|
||||
Podman specializes in all of the commands and functions that help you to maintain and modify
|
||||
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
|
||||
created from those images.
|
||||
|
||||
A major difference between Podman and Buildah is their concept of a container. Podman
|
||||
allows users to create "traditional containers" where the intent of these containers is
|
||||
to be long lived. While Buildah containers are really just created to allow content
|
||||
to be added back to the container image. An easy way to think of it is the
|
||||
`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
|
||||
command emulates the `docker run` command in functionality. Because of this and their underlying
|
||||
storage differences, you can not see Podman containers from within Buildah or vice versa.
|
||||
|
||||
In short, Buildah is an efficient way to create OCI images while Podman allows
|
||||
you to manage and maintain those images and containers in a production environment using
|
||||
familiar container cli commands. For more details, see the
|
||||
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
|
||||
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<"EOF"
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=$(buildah from "${1:-fedora}")
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run "$ctr1" -- dnf update -y
|
||||
buildah run "$ctr1" -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
|
||||
buildah config --port 80 "$ctr1"
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit "$ctr1" "${2:-$USER/lighttpd}"
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
|
||||
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
|
||||
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
|
||||
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
|
||||
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
|
||||
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
|
||||
| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. |
|
||||
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
|
||||
|
||||
**Future goals include:**
|
||||
* more CI tests
|
||||
* additional CLI commands (?)
|
||||
276
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
276
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/memfd.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifndef F_LINUX_SPECIFIC_BASE
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
#ifndef F_ADD_SEALS
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
#endif
|
||||
#ifndef F_SEAL_SEAL
|
||||
#define F_SEAL_SEAL 0x0001LU
|
||||
#endif
|
||||
#ifndef F_SEAL_SHRINK
|
||||
#define F_SEAL_SHRINK 0x0002LU
|
||||
#endif
|
||||
#ifndef F_SEAL_GROW
|
||||
#define F_SEAL_GROW 0x0004LU
|
||||
#endif
|
||||
#ifndef F_SEAL_WRITE
|
||||
#define F_SEAL_WRITE 0x0008LU
|
||||
#endif
|
||||
|
||||
#define BUFSTEP 1024
|
||||
|
||||
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
|
||||
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
|
||||
static int _containers_unshare_parse_envint(const char *envname) {
|
||||
char *p, *q;
|
||||
long l;
|
||||
|
||||
p = getenv(envname);
|
||||
if (p == NULL) {
|
||||
return -1;
|
||||
}
|
||||
q = NULL;
|
||||
l = strtol(p, &q, 10);
|
||||
if ((q == NULL) || (*q != '\0')) {
|
||||
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
|
||||
_exit(1);
|
||||
}
|
||||
unsetenv(envname);
|
||||
return l;
|
||||
}
|
||||
|
||||
static void _check_proc_sys_file(const char *path)
|
||||
{
|
||||
FILE *fp;
|
||||
char buf[32];
|
||||
size_t n_read;
|
||||
long r;
|
||||
|
||||
fp = fopen(path, "r");
|
||||
if (fp == NULL) {
|
||||
if (errno != ENOENT)
|
||||
fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces);
|
||||
} else {
|
||||
memset(buf, 0, sizeof(buf));
|
||||
n_read = fread(buf, 1, sizeof(buf) - 1, fp);
|
||||
if (n_read > 0) {
|
||||
r = atoi(buf);
|
||||
if (r == 0) {
|
||||
fprintf(stderr, "User namespaces are not enabled in %s.\n", path);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
static char **parse_proc_stringlist(const char *list) {
|
||||
int fd, n, i, n_strings;
|
||||
char *buf, *new_buf, **ret;
|
||||
size_t size, new_size, used;
|
||||
|
||||
fd = open(list, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
buf = NULL;
|
||||
size = 0;
|
||||
used = 0;
|
||||
for (;;) {
|
||||
new_size = used + BUFSTEP;
|
||||
new_buf = realloc(buf, new_size);
|
||||
if (new_buf == NULL) {
|
||||
free(buf);
|
||||
fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
|
||||
return NULL;
|
||||
}
|
||||
buf = new_buf;
|
||||
size = new_size;
|
||||
memset(buf + used, '\0', size - used);
|
||||
n = read(fd, buf + used, size - used - 1);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read(): %m\n");
|
||||
return NULL;
|
||||
}
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
used += n;
|
||||
}
|
||||
close(fd);
|
||||
n_strings = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
n_strings++;
|
||||
}
|
||||
}
|
||||
ret = calloc(n_strings + 1, sizeof(char *));
|
||||
if (ret == NULL) {
|
||||
fprintf(stderr, "calloc(): out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
i = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
ret[i++] = &buf[n];
|
||||
}
|
||||
}
|
||||
ret[i] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int containers_reexec(void) {
|
||||
char **argv, *exename;
|
||||
int fd, mmfd, n_read, n_written;
|
||||
struct stat st;
|
||||
char buf[2048];
|
||||
|
||||
argv = parse_proc_stringlist("/proc/self/cmdline");
|
||||
if (argv == NULL) {
|
||||
return -1;
|
||||
}
|
||||
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fstat(fd, &st) == -1) {
|
||||
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
exename = basename(argv[0]);
|
||||
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
|
||||
if (mmfd == -1) {
|
||||
fprintf(stderr, "memfd_create(): %m\n");
|
||||
return -1;
|
||||
}
|
||||
for (;;) {
|
||||
n_read = read(fd, buf, sizeof(buf));
|
||||
if (n_read < 0) {
|
||||
fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_read == 0) {
|
||||
break;
|
||||
}
|
||||
n_written = write(mmfd, buf, n_read);
|
||||
if (n_written < 0) {
|
||||
fprintf(stderr, "write(anonfd): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_written != n_read) {
|
||||
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error sealing memfd copy: %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fexecve(mmfd, argv, environ) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error during reexec(...): %m\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void _containers_unshare(void)
|
||||
{
|
||||
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
|
||||
char buf[2048];
|
||||
|
||||
flags = _containers_unshare_parse_envint("_Containers-unshare");
|
||||
if (flags == -1) {
|
||||
return;
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (unshare(CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
|
||||
_check_proc_sys_file (_max_user_namespaces);
|
||||
_check_proc_sys_file (_unprivileged_user_namespaces);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
|
||||
if (pidfd != -1) {
|
||||
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
|
||||
size_t size = write(pidfd, buf, strlen(buf));
|
||||
if (size != strlen(buf)) {
|
||||
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
|
||||
_exit(1);
|
||||
}
|
||||
close(pidfd);
|
||||
}
|
||||
continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
|
||||
if (continuefd != -1) {
|
||||
n = read(continuefd, buf, sizeof(buf));
|
||||
if (n > 0) {
|
||||
fprintf(stderr, "Error: %.*s\n", n, buf);
|
||||
_exit(1);
|
||||
}
|
||||
close(continuefd);
|
||||
}
|
||||
sid = _containers_unshare_parse_envint("_Containers-setsid");
|
||||
if (sid == 1) {
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "Error during setsid: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
|
||||
if (pgrp == 1) {
|
||||
if (setpgrp() == -1) {
|
||||
fprintf(stderr, "Error during setpgrp: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
ctty = _containers_unshare_parse_envint("_Containers-ctty");
|
||||
if (ctty != -1) {
|
||||
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
|
||||
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (setresgid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresgid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
if (setresuid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresuid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & ~CLONE_NEWUSER) != 0) {
|
||||
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(...): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if (containers_reexec() != 0) {
|
||||
_exit(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
545
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
545
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
@@ -0,0 +1,545 @@
|
||||
// +build linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
// handles setting ID maps and other related settings by triggering
|
||||
// initialization code in the child.
|
||||
type Cmd struct {
|
||||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
Ctty *os.File
|
||||
OOMScoreAdj *int
|
||||
Hook func(pid int) error
|
||||
}
|
||||
|
||||
// Command creates a new Cmd which can be customized.
|
||||
func Command(args ...string) *Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
return &Cmd{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set an environment variable to tell the child to synchronize its startup.
|
||||
if c.Env == nil {
|
||||
c.Env = os.Environ()
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
|
||||
|
||||
// Please the libpod "rootless" package to find the expected env variables.
|
||||
if os.Geteuid() != 0 {
|
||||
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
|
||||
}
|
||||
|
||||
// Create the pipe for reading the child's PID.
|
||||
pidRead, pidWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
|
||||
|
||||
// Create the pipe for letting the child know to proceed.
|
||||
continueRead, continueWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
pidRead.Close()
|
||||
pidWrite.Close()
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, continueRead)
|
||||
|
||||
// Pass along other instructions.
|
||||
if c.Setsid {
|
||||
c.Env = append(c.Env, "_Containers-setsid=1")
|
||||
}
|
||||
if c.Setpgrp {
|
||||
c.Env = append(c.Env, "_Containers-setpgrp=1")
|
||||
}
|
||||
if c.Ctty != nil {
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
|
||||
}
|
||||
|
||||
// Make sure we clean up our pipes.
|
||||
defer func() {
|
||||
if pidRead != nil {
|
||||
pidRead.Close()
|
||||
}
|
||||
if pidWrite != nil {
|
||||
pidWrite.Close()
|
||||
}
|
||||
if continueRead != nil {
|
||||
continueRead.Close()
|
||||
}
|
||||
if continueWrite != nil {
|
||||
continueWrite.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the new process.
|
||||
err = c.Cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
pidWrite.Close()
|
||||
pidWrite = nil
|
||||
|
||||
// Read the child's PID from the pipe.
|
||||
pidString := ""
|
||||
b := new(bytes.Buffer)
|
||||
io.Copy(b, pidRead)
|
||||
pidString = b.String()
|
||||
pid, err := strconv.Atoi(pidString)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error parsing PID %q", pidString)
|
||||
}
|
||||
pidString = fmt.Sprintf("%d", pid)
|
||||
|
||||
// If we created a new user namespace, set any specified mappings.
|
||||
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
// Always set "setgroups".
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if c.GidMappingsEnableSetgroups {
|
||||
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
} else {
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
|
||||
uidmap, gidmap, err := GetHostIDMappings("")
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
|
||||
return errors.Wrapf(err, "error reading ID mappings in parent")
|
||||
}
|
||||
if len(c.UidMappings) == 0 {
|
||||
c.UidMappings = uidmap
|
||||
for i := range c.UidMappings {
|
||||
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(c.GidMappings) == 0 {
|
||||
c.GidMappings = gidmap
|
||||
for i := range c.GidMappings {
|
||||
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.GidMappings) > 0 {
|
||||
// Build the GID map, since writing to the proc file has to be done all at once.
|
||||
g := new(bytes.Buffer)
|
||||
for _, m := range c.GidMappings {
|
||||
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
// Set the GID map.
|
||||
if c.UseNewgidmap {
|
||||
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String())
|
||||
return errors.Wrapf(err, "error running newgidmap: %s", g.String())
|
||||
}
|
||||
} else {
|
||||
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
|
||||
}
|
||||
defer gidmap.Close()
|
||||
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) > 0 {
|
||||
// Build the UID map, since writing to the proc file has to be done all at once.
|
||||
u := new(bytes.Buffer)
|
||||
for _, m := range c.UidMappings {
|
||||
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
// Set the GID map.
|
||||
if c.UseNewuidmap {
|
||||
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String())
|
||||
return errors.Wrapf(err, "error running newuidmap: %s", u.String())
|
||||
}
|
||||
} else {
|
||||
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
|
||||
}
|
||||
defer uidmap.Close()
|
||||
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OOMScoreAdj != nil {
|
||||
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
|
||||
}
|
||||
defer oomScoreAdj.Close()
|
||||
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
|
||||
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
|
||||
}
|
||||
}
|
||||
// Run any additional setup that we want to do before the child starts running proper.
|
||||
if c.Hook != nil {
|
||||
if err = c.Hook(pid); err != nil {
|
||||
fmt.Fprintf(continueWrite, "hook error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
func (c *Cmd) CombinedOutput() ([]byte, error) {
|
||||
return nil, errors.New("unshare: CombinedOutput() not implemented")
|
||||
}
|
||||
|
||||
func (c *Cmd) Output() ([]byte, error) {
|
||||
return nil, errors.New("unshare: Output() not implemented")
|
||||
}
|
||||
|
||||
var (
|
||||
isRootlessOnce sync.Once
|
||||
isRootless bool
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
isRootlessOnce.Do(func() {
|
||||
isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
|
||||
})
|
||||
return isRootless
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
}
|
||||
|
||||
type Runnable interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) {
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
} else {
|
||||
logrus.Errorf("%v", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
// If we've already been through this once, no need to try again.
|
||||
if os.Geteuid() == 0 && IsRootless() {
|
||||
return
|
||||
}
|
||||
|
||||
var uidNum, gidNum uint64
|
||||
// Figure out who we are.
|
||||
me, err := user.Current()
|
||||
if !os.IsNotExist(err) {
|
||||
bailOnError(err, "error determining current user")
|
||||
uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
|
||||
bailOnError(err, "error parsing current UID %s", me.Uid)
|
||||
gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
|
||||
bailOnError(err, "error parsing current GID %s", me.Gid)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// ID mappings to use to reexec ourselves.
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
if uidNum != 0 || evenForRoot {
|
||||
// Read the set of ID mappings that we're allowed to use. Each
|
||||
// range in /etc/subuid and /etc/subgid file is a starting host
|
||||
// ID and a range size.
|
||||
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
|
||||
bailOnError(err, "error reading allowed ID mappings")
|
||||
if len(uidmap) == 0 {
|
||||
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
|
||||
}
|
||||
// Map our UID and GID, then the subuid and subgid ranges,
|
||||
// consecutively, starting at 0, to get the mappings to use for
|
||||
// a copy of ourselves.
|
||||
uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
|
||||
gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
|
||||
var rangeStart uint32
|
||||
for i := range uidmap {
|
||||
uidmap[i].ContainerID = rangeStart
|
||||
rangeStart += uidmap[i].Size
|
||||
}
|
||||
rangeStart = 0
|
||||
for i := range gidmap {
|
||||
gidmap[i].ContainerID = rangeStart
|
||||
rangeStart += gidmap[i].Size
|
||||
}
|
||||
} else {
|
||||
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
|
||||
// to use unshare(), so don't bother creating a new user namespace at this point.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
bailOnError(err, "error reading the current capabilities sets")
|
||||
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
|
||||
return
|
||||
}
|
||||
// Read the set of ID mappings that we're currently using.
|
||||
uidmap, gidmap, err = GetHostIDMappings("")
|
||||
bailOnError(err, "error reading current ID mappings")
|
||||
// Just reuse them.
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
|
||||
// Unlike most uses of reexec or unshare, we're using a name that
|
||||
// _won't_ be recognized as a registered reexec handler, since we
|
||||
// _want_ to fall through reexec.Init() to the normal main().
|
||||
cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
|
||||
|
||||
// If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
|
||||
err = os.Setenv(UsernsEnvName, "1")
|
||||
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
|
||||
|
||||
// Set the default isolation type to use the "rootless" method.
|
||||
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
|
||||
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse our stdio.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up a new user namespace with the ID mapping.
|
||||
cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
|
||||
cmd.UseNewuidmap = uidNum != 0
|
||||
cmd.UidMappings = uidmap
|
||||
cmd.UseNewgidmap = uidNum != 0
|
||||
cmd.GidMappings = gidmap
|
||||
cmd.GidMappingsEnableSetgroups = true
|
||||
|
||||
// Finish up.
|
||||
logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
|
||||
ExecRunnable(cmd)
|
||||
}
|
||||
|
||||
// ExecRunnable runs the specified unshare command, captures its exit status,
|
||||
// and exits with the same status.
|
||||
func ExecRunnable(cmd Runnable) {
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
|
||||
if exitError.ProcessState.Exited() {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(waitStatus.ExitStatus())
|
||||
}
|
||||
if waitStatus.Signaled() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(int(waitStatus.Signal()) + 128)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Errorf("%v", err)
|
||||
logrus.Errorf("(unable to determine exit status)")
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// getHostIDMappings reads mappings from the named node under /proc.
|
||||
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
|
||||
var mappings []specs.LinuxIDMapping
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
|
||||
}
|
||||
cid, err := strconv.ParseUint(fields[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
|
||||
}
|
||||
hid, err := strconv.ParseUint(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
|
||||
}
|
||||
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
if pid == "" {
|
||||
pid = "self"
|
||||
}
|
||||
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
|
||||
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
mappings, err := idtools.NewIDMappings(user, group)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
|
||||
}
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
for _, m := range mappings.UIDs() {
|
||||
uidmap = append(uidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
for _, m := range mappings.GIDs() {
|
||||
gidmap = append(gidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build linux,cgo,!gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build linux,cgo,gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall -Wextra
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// This next bit is straight out of libcontainer.
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
68
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
68
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/blang/semver v3.5.0
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containernetworking/cni v0.7.0-rc2
|
||||
github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/containers/storage v1.12.2
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
|
||||
github.com/docker/docker-credential-helpers v0.6.1
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.3.2
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83
|
||||
github.com/fsouza/go-dockerclient v1.3.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gogo/protobuf v1.2.0
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/gorilla/mux v1.6.2
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/imdario/mergo v0.3.6
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.8.3
|
||||
github.com/mistifyio/go-zfs v2.1.1
|
||||
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# TODO: Gotty has not been updated since 2012. Can we find a replacement?
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools v0.8.0
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/openshift/imagebuilder v1.1.0
|
||||
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/seccomp/libseccomp-golang v0.9.0
|
||||
github.com/seccomp/containers-golang v0.1
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
|
||||
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
|
||||
2
vendor/github.com/containers/image/README.md
generated
vendored
2
vendor/github.com/containers/image/README.md
generated
vendored
@@ -65,7 +65,7 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## [Contributing](CONTRIBUTING.md)**
|
||||
## [Contributing](CONTRIBUTING.md)
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
|
||||
334
vendor/github.com/containers/image/copy/copy.go
generated
vendored
334
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -2,36 +2,50 @@ package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
"github.com/vbauerster/mpb"
|
||||
"github.com/vbauerster/mpb/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
@@ -64,6 +78,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -71,21 +86,24 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
@@ -140,13 +158,26 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
}()
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
}
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
c := &copier{
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
copyInParallel: copyInParallel,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
@@ -184,7 +215,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifest []byte, retErr error) {
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
@@ -207,6 +238,26 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
sourceManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading manifest from source image")
|
||||
}
|
||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -234,8 +285,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0,
|
||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||
}
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, err
|
||||
@@ -259,7 +317,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -290,7 +348,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifest = attemptedManifest
|
||||
manifestBytes = attemptedManifest
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
@@ -300,7 +358,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifest, options.SignBy)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -312,7 +370,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
return manifestBytes, nil
|
||||
}
|
||||
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
@@ -365,11 +423,18 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
return terminal.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
destInfos := []types.BlobInfo{}
|
||||
diffIDs := []digest.Digest{}
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -382,30 +447,70 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
}
|
||||
for _, srcLayer := range srcInfos {
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
|
||||
type copyLayerData struct {
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
var copySemaphore *semaphore.Weighted
|
||||
if ic.c.copyInParallel {
|
||||
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
|
||||
} else {
|
||||
copySemaphore = semaphore.NewWeighted(int64(1))
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
destInfo = srcLayer
|
||||
ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
} else {
|
||||
destInfo, diffID, err = ic.copyLayer(ctx, srcLayer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
|
||||
}
|
||||
destInfos = append(destInfos, destInfo)
|
||||
diffIDs = append(diffIDs, diffID)
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
copyGroup.Wait()
|
||||
}()
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
@@ -468,18 +573,67 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// newProgressPool creates a *mpb.Progress and a cleanup function.
|
||||
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
|
||||
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
|
||||
return pool, func() {
|
||||
cancel()
|
||||
pool.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
bar := pool.AddBar(info.Size,
|
||||
mpb.BarClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(prefix),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
|
||||
),
|
||||
)
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
c.Printf("Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true)
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
bar.SetTotal(int64(len(configBlob)), true)
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
@@ -497,43 +651,39 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
||||
bar.SetTotal(0, true)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
|
||||
diffID := cachedDiffID
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -543,12 +693,15 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
} else {
|
||||
return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
|
||||
}
|
||||
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
return blobInfo, diffID, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
@@ -556,7 +709,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -580,7 +733,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@@ -617,14 +770,14 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
// === Process input through digestingReader to validate against the expected digest.
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
@@ -640,16 +793,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
// === Report progress using a pb.Reader.
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.Output = c.reportWriter
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer bar.Finish()
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
@@ -660,8 +804,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
@@ -674,6 +820,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
compressionOperation = types.Decompress
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -684,6 +831,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
@@ -699,7 +847,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
@@ -722,6 +870,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
@@ -732,7 +896,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := gzip.NewWriter(dest)
|
||||
zipper := pgzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
|
||||
32
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
32
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -124,13 +124,19 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -169,27 +175,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -48,8 +48,15 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, manifest.GuessMIMEType(m), err
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dirImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
|
||||
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
||||
207
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
207
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -13,15 +13,17 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -69,30 +71,40 @@ type extensionSignatureList struct {
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
|
||||
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
||||
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
||||
tlsClientConfig *tls.Config
|
||||
// The following members are not set by newDockerClient and must be set by callers if needed.
|
||||
username string
|
||||
password string
|
||||
client *http.Client
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
client *http.Client
|
||||
scheme string
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||
token *bearerToken
|
||||
tokenExpiration time.Time
|
||||
|
||||
// Private state for setupRequestAuth (key: string, value: bearerToken)
|
||||
tokenCache sync.Map
|
||||
// Private state for detectProperties:
|
||||
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
|
||||
detectPropertiesError error // detectPropertiesError caches the initial error.
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
@@ -128,6 +140,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
@@ -186,7 +199,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
@@ -194,19 +207,31 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteName := reference.Path(ref.ref)
|
||||
|
||||
return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
|
||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
client.signatureBase = sigBase
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
|
||||
func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
|
||||
// newDockerClient returns a new dockerClient instance for the given registry
|
||||
// and reference. The reference is used to query the registry configuration
|
||||
// and can either be a registry (e.g, "registry.com[:5000]"), a repository
|
||||
// (e.g., "registry.com[:5000][/some/namespace]/repo").
|
||||
// Please note that newDockerClient does not set all members of dockerClient
|
||||
// (e.g., username and password); those must be set by callers if necessary.
|
||||
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = serverDefault()
|
||||
tlsClientConfig := serverDefault()
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
@@ -217,37 +242,40 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerInsecureSkipTLSVerify {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = true
|
||||
// Check if TLS verification shall be skipped (default=false) which can
|
||||
// be specified in the sysregistriesv2 configuration.
|
||||
skipVerify := false
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
username: username,
|
||||
password: password,
|
||||
client: &http.Client{Transport: tr},
|
||||
signatureBase: sigBase,
|
||||
scope: authScope{
|
||||
actions: actions,
|
||||
remoteName: remoteName,
|
||||
},
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occcured while making the http request or the status code received was 401
|
||||
// returns an error if an error occurred while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -299,16 +327,21 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
|
||||
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
// the call made to that endpoint will fail. So using the v1 hostname
|
||||
// for docker.io for simplicity of implementation and the fact that it
|
||||
// returns search results.
|
||||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerV1Hostname
|
||||
hostname = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
// Only try the v1 search endpoint if the search query is not empty. If it is
|
||||
// empty skip to the v2 endpoint.
|
||||
@@ -322,8 +355,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
|
||||
logrus.Debugf("trying to talk to v1 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
@@ -339,8 +372,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
|
||||
logrus.Debugf("trying to talk to v2 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
@@ -369,20 +402,20 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -401,7 +434,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -420,7 +453,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
|
||||
//
|
||||
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
|
||||
if len(c.challenges) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -432,24 +465,27 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "bearer":
|
||||
if c.token == nil || time.Now().After(c.tokenExpiration) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
var scope string
|
||||
if c.scope.remoteName != "" && c.scope.actions != "" {
|
||||
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
}
|
||||
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
t, inCache := c.tokenCache.Load(cacheKey)
|
||||
if inCache {
|
||||
token = t.(bearerToken)
|
||||
}
|
||||
if !inCache || time.Now().After(token.expirationTime) {
|
||||
t, err := c.getBearerToken(req.Context(), challenge, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.token = token
|
||||
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
token = *t
|
||||
c.tokenCache.Store(cacheKey, token)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
|
||||
return nil
|
||||
default:
|
||||
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
|
||||
@@ -459,7 +495,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -469,11 +510,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service != "" {
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
authReq.URL.RawQuery = getParams.Encode()
|
||||
if c.username != "" && c.password != "" {
|
||||
@@ -505,16 +548,21 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
// it at most once.
|
||||
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
// We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
|
||||
// specified by the system context
|
||||
if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = c.tlsClientConfig
|
||||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
@@ -530,7 +578,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
err := ping("https")
|
||||
if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if err != nil && c.tlsClientConfig.InsecureSkipVerify {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -541,7 +589,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
@@ -554,7 +602,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
@@ -564,11 +612,18 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
|
||||
return c.detectPropertiesError
|
||||
}
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -25,7 +25,7 @@ type Image struct {
|
||||
// a client to the registry hosting the given image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
|
||||
s, err := newImageSource(sys, ref)
|
||||
s, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
190
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
190
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -12,9 +12,11 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
@@ -110,27 +112,36 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() != "" {
|
||||
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -147,7 +158,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -160,13 +171,13 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -177,21 +188,17 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -202,7 +209,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
@@ -211,8 +218,128 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
"mount": {srcDigest.String()},
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusCreated:
|
||||
logrus.Debugf("... mount OK")
|
||||
return nil
|
||||
case http.StatusAccepted:
|
||||
// Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
|
||||
// Abort, and let the ultimate caller do an upload when its ready, instead.
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
defer res2.Body.Close()
|
||||
if res2.StatusCode != http.StatusNoContent {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
// Anyway, if canceling the upload fails, ignore it and return the more important error:
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
|
||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
@@ -237,7 +364,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -264,14 +391,29 @@ func isManifestInvalidError(err error) bool {
|
||||
if !ok || len(errors) == 0 {
|
||||
return false
|
||||
}
|
||||
ec, ok := errors[0].(errcode.ErrorCoder)
|
||||
err = errors[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch ec.ErrorCode() {
|
||||
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
|
||||
case v2.ErrorCodeManifestInvalid:
|
||||
return true
|
||||
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
|
||||
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
|
||||
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
|
||||
case v2.ErrorCodeTagInvalid:
|
||||
return true
|
||||
// ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
|
||||
// uploading an OCI manifest that is (correctly, according to the spec) missing
|
||||
// a top-level media type. See libpod issue #1719
|
||||
// FIXME: remove this case when ECR behavior is fixed
|
||||
case errcode.ErrorCodeUnsupported:
|
||||
return strings.Contains(err.Error(), "Invalid JSON syntax")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
@@ -442,7 +584,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
124
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
124
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -28,17 +29,94 @@ type dockerImageSource struct {
|
||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||
}
|
||||
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
c, err := newDockerClientFromRef(sys, ref, false, "pull")
|
||||
// newImageSource creates a new `ImageSource` for the specified image reference
|
||||
// `ref`.
|
||||
//
|
||||
// The following steps will be done during the instance creation:
|
||||
//
|
||||
// - Lookup the registry within the configured location in
|
||||
// `sys.SystemRegistriesConfPath`. If there is no configured registry available,
|
||||
// we fallback to the provided docker reference `ref`.
|
||||
//
|
||||
// - References which contain a configured prefix will be automatically rewritten
|
||||
// to the correct target reference. For example, if the configured
|
||||
// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be
|
||||
// pulled from the ref `example.com/foo/image`, then the resulting pull will
|
||||
// effectively point to `example.com/image`.
|
||||
//
|
||||
// - If the rewritten reference succeeds, it will be used as the `dockerRef`
|
||||
// in the client. If the rewrite fails, the function immediately returns an error.
|
||||
//
|
||||
// - Each mirror will be used (in the configured order) to test the
|
||||
// availability of the image manifest on the remote location. For example,
|
||||
// if the manifest is not reachable due to connectivity issues, then the next
|
||||
// mirror will be tested instead. If no mirror is configured or contains the
|
||||
// target manifest, then the initial `ref` will be tested as fallback. The
|
||||
// creation of the new `dockerImageSource` only succeeds if a remote
|
||||
// location with the available manifest was found.
|
||||
//
|
||||
// A cleanup call to `.Close()` is needed if the caller is done using the returned
|
||||
// `ImageSource`.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error loading registries configuration")
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so we create
|
||||
// a fallback registry by hand to make the client creation below work
|
||||
// as intended.
|
||||
registry = &sysregistriesv2.Registry{
|
||||
Endpoint: sysregistriesv2.Endpoint{
|
||||
Location: ref.ref.String(),
|
||||
},
|
||||
Prefix: ref.ref.String(),
|
||||
}
|
||||
}
|
||||
|
||||
primaryDomain := reference.Domain(ref.ref)
|
||||
// Found the registry within the sysregistriesv2 configuration. Now we test
|
||||
// all endpoints for the manifest availability. If a working image source
|
||||
// was found, it will be used for all future pull actions.
|
||||
manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
for _, endpoint := range append(registry.Mirrors, registry.Endpoint) {
|
||||
logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location)
|
||||
|
||||
newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerRef, err := newReference(newRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure
|
||||
|
||||
testImageSource := &dockerImageSource{
|
||||
ref: dockerRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
|
||||
if manifestLoadErr == nil {
|
||||
return testImageSource, nil
|
||||
}
|
||||
}
|
||||
return nil, manifestLoadErr
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
@@ -89,7 +167,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -137,7 +215,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
@@ -147,10 +225,10 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Body != nil && err == nil {
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, err
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
@@ -161,15 +239,22 @@ func getBlobSize(resp *http.Response) int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -177,6 +262,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
|
||||
// print url also
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
@@ -332,7 +418,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -354,7 +440,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth)
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@@ -61,8 +61,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref)
|
||||
}
|
||||
|
||||
// newReference returns a dockerReference for a named reference.
|
||||
func newReference(ref reference.Named) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
@@ -72,8 +77,9 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
}, nil
|
||||
@@ -135,7 +141,7 @@ func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContex
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(sys, ref)
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
||||
42
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
42
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@@ -82,13 +82,19 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
@@ -120,12 +126,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, size, err := d.HasBlob(ctx, inputInfo)
|
||||
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
@@ -151,29 +157,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with
|
||||
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||
// the blob must also be returned. If the destination does not contain the
|
||||
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||
// returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||
// previously returned true would have been passed to PutBlob if it had
|
||||
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||
// it describes need to be applied again when composing a filesystem tree.
|
||||
func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
|
||||
|
||||
83
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
83
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -21,8 +22,10 @@ import (
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
tarPath string
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects
|
||||
tarManifest *ManifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
@@ -199,43 +202,46 @@ func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
func (s *Source) ensureCachedDataIsPresent() error {
|
||||
if s.tarManifest != nil {
|
||||
return nil
|
||||
}
|
||||
s.cacheDataLock.Do(func() {
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
return
|
||||
}
|
||||
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
}
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
}
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
})
|
||||
return s.cacheDataResult
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
@@ -397,8 +403,15 @@ func (r uncompressedReadCloser) Close() error {
|
||||
return res
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *Source) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
% containers-certs.d(5)
|
||||
|
||||
# NAME
|
||||
containers-certs.d - Directory for storing custom container-registry TLS configurations
|
||||
|
||||
# DESCRIPTION
|
||||
A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`.
|
||||
The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`).
|
||||
|
||||
## Directory Structure
|
||||
A certs directory can contain one or more files with the following extensions:
|
||||
|
||||
* `*.crt` files with this extensions will be interpreted as CA certificates
|
||||
* `*.cert` files with this extensions will be interpreted as client certificates
|
||||
* `*.key` files with this extensions will be interpreted as client keys
|
||||
|
||||
Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`).
|
||||
An examplary setup for a registry running at `my-registry.com:5000` may look as follows:
|
||||
```
|
||||
/etc/containers/certs.d/ <- Certificate directory
|
||||
└── my-registry.com:5000 <- Hostname:port
|
||||
├── client.cert <- Client certificate
|
||||
├── client.key <- Client key
|
||||
└── ca.crt <- Certificate authority that signed the registry certificate
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Feb 2019, Originally compiled by Valentin Rothberg <rothberg@redhat.com>
|
||||
77
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
77
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
@@ -7,14 +7,68 @@ containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML. The valid
|
||||
categories are: 'registries.search', 'registries.insecure', and
|
||||
'registries.block'.
|
||||
file for container image registries. The file format is TOML.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
# FORMATS
|
||||
|
||||
## VERSION 2
|
||||
VERSION 2 is the latest format of the `registries.conf` and is currently in
|
||||
beta. This means in general VERSION 1 should be used in production environments
|
||||
for now.
|
||||
|
||||
Every registry can have its own mirrors configured. The mirrors will be tested
|
||||
in order for the availability of the remote manifest. This happens currently
|
||||
only during an image pull. If the manifest is not reachable due to connectivity
|
||||
issues or the unavailability of the remote manifest, then the next mirror will
|
||||
be tested instead. If no mirror is configured or contains the manifest to be
|
||||
pulled, then the initially provided reference will be used as fallback. It is
|
||||
possible to set the `insecure` option per mirror, too.
|
||||
|
||||
Furthermore it is possible to specify a `prefix` for a registry. The `prefix`
|
||||
is used to find the relevant target registry from where the image has to be
|
||||
pulled. During the test for the availability of the image, the prefixed
|
||||
location will be rewritten to the correct remote location. This applies to
|
||||
mirrors as well as the fallback `location`. If no prefix is specified, it
|
||||
defaults to the specified `location`. For example, if
|
||||
`prefix = "example.com/foo"`, `location = "example.com"` and the image will be
|
||||
pulled from `example.com/foo/image`, then the resulting pull will be effectively
|
||||
point to `example.com/image`.
|
||||
|
||||
By default container runtimes use TLS when retrieving images from a registry.
|
||||
If the registry is not setup with TLS, then the container runtime will fail to
|
||||
pull images from the registry. If you set `insecure = true` for a registry or a
|
||||
mirror you overwrite the `insecure` flag for that specific entry. This means
|
||||
that the container runtime will attempt use unencrypted HTTP to pull the image.
|
||||
It also allows you to pull from a registry with self-signed certificates.
|
||||
|
||||
If you set the `unqualified-search = true` for the registry, then it is possible
|
||||
to omit the registry hostname when pulling images. This feature does not work
|
||||
together with a specified `prefix`.
|
||||
|
||||
If `blocked = true` then it is not allowed to pull images from that registry.
|
||||
|
||||
### EXAMPLE
|
||||
|
||||
```
|
||||
[[registry]]
|
||||
location = "example.com"
|
||||
insecure = false
|
||||
prefix = "example.com/foo"
|
||||
unqualified-search = false
|
||||
blocked = false
|
||||
mirror = [
|
||||
{ location = "example-mirror-0.local", insecure = false },
|
||||
{ location = "example-mirror-1.local", insecure = true }
|
||||
]
|
||||
```
|
||||
|
||||
## VERSION 1
|
||||
VERSION 1 can be used as alternative to the VERSION 2, but it is not capable in
|
||||
using registry mirrors or a prefix.
|
||||
|
||||
The TOML_format is used to build a simple list for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
@@ -22,18 +76,13 @@ Search registries are used when the caller of a container runtime does not fully
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
The fields `registries.insecure` and `registries.block` work as like as the
|
||||
`insecure` and `blocked` from VERSION 2.
|
||||
|
||||
# EXAMPLE
|
||||
### EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
@@ -49,6 +98,8 @@ registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Mar 2019, Added additional configuration format by Sascha Grunert <sgrunert@suse.com>
|
||||
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% container-signature(5) Container signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
# Container signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
This document describes the format of container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
@@ -21,7 +21,7 @@ an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
A container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
@@ -34,7 +34,7 @@ associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
As distributed, the container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
@@ -46,7 +46,7 @@ that this is not necessary and the configured expected public key provides anoth
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
Consumers of container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
@@ -89,7 +89,7 @@ and if the semantics of the invalid document, as created by such an implementati
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
The `critical` object MUST contain a `type` member identifying the document as a container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
@@ -210,7 +210,7 @@ Consumers still SHOULD reject any signature where a member of an `optional` obje
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
The contents of this string is not defined in detail; however each implementation creating container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
@@ -221,7 +221,7 @@ The contents of this string is not defined in detail; however each implementatio
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page
|
||||
% Valentin Rothberg
|
||||
% April 2019
|
||||
|
||||
## NAME
|
||||
|
||||
containers-transports - description of supported transports for copying and storing container images
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations.
|
||||
The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below.
|
||||
|
||||
### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]}
|
||||
|
||||
An image located in a local containers storage.
|
||||
The format of _docker-reference_ is described in detail in the **docker** transport.
|
||||
|
||||
The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory.
|
||||
The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored.
|
||||
The optional `options` are a comma-separated list of driver-specific options.
|
||||
Please refer to containers-storage.conf(5) for further information on the drivers and supported options.
|
||||
|
||||
### **dir:**_path_
|
||||
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files.
|
||||
This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
### **docker://**_docker-reference_
|
||||
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2".
|
||||
By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1).
|
||||
If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1).
|
||||
The containers-registries.conf(5) further allows for configuring various settings of a registry.
|
||||
|
||||
Note that a _docker-reference_ has the following format: `name[:tag|@digest]`.
|
||||
While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do.
|
||||
Digests can also be used in an image destination as long as the manifest matches the provided digest.
|
||||
The digest of images can be explored with skopeo-inspect(1).
|
||||
If `name` does not contain a slash, it is treated as `docker.io/library/name`.
|
||||
Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost).
|
||||
If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`.
|
||||
|
||||
### **docker-archive:**_path[:docker-reference]_
|
||||
|
||||
An image is stored in the docker-save(1) formatted file.
|
||||
_docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable.
|
||||
|
||||
### **docker-daemon:**_docker-reference|algo:digest_
|
||||
|
||||
An image stored in the docker daemon's internal storage.
|
||||
The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source.
|
||||
The _algo:digest_ refers to the image ID reported by docker-inspect(1).
|
||||
|
||||
### **oci:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" at _path_.
|
||||
Using a _tag_ is optional and allows for storing multiple images at the same _path_.
|
||||
|
||||
### **oci-archive:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_.
|
||||
|
||||
### **ostree:**_docker-reference[@/absolute/repo/path]_
|
||||
|
||||
An image in the local ostree(1) repository.
|
||||
_/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
## Examples
|
||||
|
||||
The following examples demonstrate how some of the containers transports can be used.
|
||||
The examples use skopeo-copy(1) for copying container images.
|
||||
|
||||
**Copying an image from one registry to another**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest
|
||||
```
|
||||
|
||||
**Copying an image from a running Docker daemon to a directory in the OCI layout**:
|
||||
```
|
||||
$ mkdir alpine-oci
|
||||
$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci
|
||||
$ tree alpine-oci
|
||||
test-oci/
|
||||
├── blobs
|
||||
│ └── sha256
|
||||
│ ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7
|
||||
│ ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc
|
||||
│ └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7
|
||||
├── index.json
|
||||
└── oci-layout
|
||||
|
||||
2 directories, 5 files
|
||||
```
|
||||
|
||||
**Copying an image from a registry to the local storage**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
Valentin Rothberg <rothberg@redhat.com>
|
||||
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
2
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
@@ -275,7 +275,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
raw := make(map[string]*json.RawMessage)
|
||||
err = json.Unmarshal(config, &raw)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s1)
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1)
|
||||
}
|
||||
// Drop some fields.
|
||||
delete(raw, "id")
|
||||
|
||||
34
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
34
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@@ -25,7 +25,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociA
|
||||
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -77,20 +77,32 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
|
||||
func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.unpackedDest.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination
|
||||
|
||||
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
@@ -28,7 +28,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
|
||||
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -76,9 +76,16 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
|
||||
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
|
||||
38
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
38
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@@ -107,13 +107,20 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -173,30 +180,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
11
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
11
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
@@ -92,8 +92,15 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, mimeType, nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ociImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
42
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
42
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -96,7 +96,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
logrus.Debugf("%s %s", method, url)
|
||||
logrus.Debugf("%s %s", method, url.String())
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -211,12 +211,19 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
|
||||
return s.docker.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureImageIsResolved(ctx); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return s.docker.GetBlob(ctx, info)
|
||||
return s.docker.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
@@ -376,26 +383,31 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.docker.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.docker.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.docker.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
45
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
45
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@@ -4,7 +4,6 @@ package ostree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
|
||||
@@ -132,7 +132,20 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -241,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
|
||||
}
|
||||
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
|
||||
mfz := gzip.NewWriter(output)
|
||||
mfz := pgzip.NewWriter(output)
|
||||
defer mfz.Close()
|
||||
metaPacker := storage.NewJSONPacker(mfz)
|
||||
|
||||
@@ -322,12 +335,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if d.repo == nil {
|
||||
repo, err := openRepo(d.ref.repo)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
d.repo = repo
|
||||
}
|
||||
@@ -335,29 +354,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
|
||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(data, 10, 64)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
return true, size, nil
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
28
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
28
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@@ -4,7 +4,6 @@ package ostree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
@@ -17,7 +16,8 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
@@ -255,8 +255,15 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
|
||||
return getter.Get(path)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
|
||||
blob := info.Digest.Hex()
|
||||
|
||||
@@ -302,28 +309,23 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
|
||||
}
|
||||
|
||||
mf := bytes.NewReader(tarsplit)
|
||||
mfz, err := gzip.NewReader(mf)
|
||||
mfz, err := pgzip.NewReader(mf)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer mfz.Close()
|
||||
metaUnpacker := storage.NewJSONUnpacker(mfz)
|
||||
|
||||
getter, err := newOSTreePathFileGetter(s.repo, branch)
|
||||
if err != nil {
|
||||
mfz.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
ots := asm.NewOutputTarStream(getter, metaUnpacker)
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
io.Copy(pipeWriter, ots)
|
||||
pipeWriter.Close()
|
||||
}()
|
||||
|
||||
rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
|
||||
rc := ioutils.NewReadCloserWrapper(ots, func() error {
|
||||
getter.Close()
|
||||
mfz.Close()
|
||||
return ots.Close()
|
||||
})
|
||||
return rc, layerSize, nil
|
||||
|
||||
332
vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
Normal file
332
vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
Normal file
@@ -0,0 +1,332 @@
|
||||
// Package boltdb implements a BlobInfoCache backed by BoltDB.
|
||||
package boltdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/types"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
|
||||
// FIXME: For CRI-O, does this need to hide information between different users?
|
||||
|
||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||
// (as a set of key=digest, value="" pairs)
|
||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
|
||||
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
|
||||
knownLocationsBucket = []byte("knownLocations")
|
||||
)
|
||||
|
||||
// Concurrency:
|
||||
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
|
||||
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
|
||||
|
||||
// pathLock contains a lock for a specific BoltDB database path.
|
||||
type pathLock struct {
|
||||
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
|
||||
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
|
||||
}
|
||||
|
||||
var (
|
||||
// pathLocks contains a lock for each currently open file.
|
||||
// This must be global so that independently created instances of boltDBCache exclude each other.
|
||||
// The map is protected by pathLocksMutex.
|
||||
// FIXME? Should this be based on device:inode numbers instead of paths instead?
|
||||
pathLocks = map[string]*pathLock{}
|
||||
pathLocksMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
// lockPath obtains the pathLock for path.
|
||||
// The caller must call unlockPath eventually.
|
||||
func lockPath(path string) {
|
||||
pl := func() *pathLock { // A scope for defer
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if ok {
|
||||
pl.refCount++
|
||||
} else {
|
||||
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
|
||||
pathLocks[path] = pl
|
||||
}
|
||||
return pl
|
||||
}()
|
||||
pl.mutex.Lock()
|
||||
}
|
||||
|
||||
// unlockPath releases the pathLock for path.
|
||||
func unlockPath(path string) {
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if !ok {
|
||||
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
|
||||
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
|
||||
}
|
||||
pl.mutex.Unlock()
|
||||
pl.refCount--
|
||||
if pl.refCount == 0 {
|
||||
delete(pathLocks, path)
|
||||
}
|
||||
}
|
||||
|
||||
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type cache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
//
|
||||
// Most users should call blobinfocache.DefaultCache instead.
|
||||
func New(path string) types.BlobInfoCache {
|
||||
return &cache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
// Hence this preliminary check, which is RACY: Another process could remove the file
|
||||
// between the Lstat call and opening the database.
|
||||
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.View(fn)
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.Update(fn)
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
if err == nil {
|
||||
return d
|
||||
}
|
||||
// FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
}
|
||||
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
|
||||
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
|
||||
c := b.Cursor()
|
||||
if k, _ := c.First(); k != nil { // The bucket is non-empty
|
||||
return anyDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
previous, err := digest.Parse(string(previousBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := time.Now().MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
|
||||
b := scopeBucket.Bucket([]byte(digest.String()))
|
||||
if b == nil {
|
||||
return candidates
|
||||
}
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
t := time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
|
||||
if canSubstitute {
|
||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||
b := tx.Bucket(digestByUncompressedBucket)
|
||||
if b != nil {
|
||||
b = b.Bucket([]byte(uncompressedDigestValue.String()))
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, _ []byte) error {
|
||||
d, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d != primaryDigest && d != uncompressedDigestValue {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, d)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigestValue != primaryDigest {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
||||
75
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
75
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/image/pkg/blobinfocache/boltdb"
|
||||
"github.com/containers/image/pkg/blobinfocache/memory"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// blobInfoCacheFilename is the file name used for blob info caches.
|
||||
// If the format changes in an incompatible way, increase the version number.
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
|
||||
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
|
||||
// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
|
||||
func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
if sys != nil && sys.BlobInfoCacheDir != "" {
|
||||
return sys.BlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
|
||||
// and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
|
||||
if euid == 0 {
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
|
||||
}
|
||||
return systemBlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
dataDir = filepath.Join(home, ".local", "share")
|
||||
}
|
||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||
}
|
||||
|
||||
func getRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
dir, err := blobInfoCacheDir(sys, getRootlessUID())
|
||||
if err != nil {
|
||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||
return memory.New()
|
||||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
|
||||
return memory.New()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return boltdb.New(path)
|
||||
}
|
||||
110
vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
Normal file
110
vendor/github.com/containers/image/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Package prioritize provides utilities for prioritizing locations in
|
||||
// types.BlobInfoCache.CandidateLocations.
|
||||
package prioritize
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate types.BICReplacementCandidate // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
type candidateSortState struct {
|
||||
cs []CandidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.Candidate.Digest < xj.Candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
}
|
||||
res := make([]types.BICReplacementCandidate, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].Candidate
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
}
|
||||
145
vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go
generated
vendored
Normal file
145
vendor/github.com/containers/image/pkg/blobinfocache/memory/memory.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Package memory implements an in-memory BlobInfoCache.
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// locationKey only exists to make lookup in knownLocations easier.
|
||||
type locationKey struct {
|
||||
transport string
|
||||
scope types.BICTransportScope
|
||||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// cache implements an in-memory-only BlobInfoCache
|
||||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
//
|
||||
// This is primarily intended for tests, but also used as a fallback
|
||||
// if blobinfocache.DefaultCache can’t determine, or set up, the
|
||||
// location for a persistent cache. Most users should use
|
||||
// blobinfocache.DefaultCache. instead of calling this directly.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use
|
||||
// this instead of a persistent cache.
|
||||
func New() types.BlobInfoCache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
return mem.uncompressedDigestLocked(anyDigest)
|
||||
}
|
||||
|
||||
// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
|
||||
func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
|
||||
return anyDigest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigests[anyDigest] = uncompressed
|
||||
|
||||
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
|
||||
if !ok {
|
||||
anyDigestSet = map[digest.Digest]struct{}{}
|
||||
mem.digestsByUncompressed[uncompressed] = anyDigestSet
|
||||
}
|
||||
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
locationScope, ok := mem.knownLocations[key]
|
||||
if !ok {
|
||||
locationScope = map[types.BICLocationReference]time.Time{}
|
||||
mem.knownLocations[key] = locationScope
|
||||
}
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d)
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
}
|
||||
49
vendor/github.com/containers/image/pkg/blobinfocache/none/none.go
generated
vendored
Normal file
49
vendor/github.com/containers/image/pkg/blobinfocache/none/none.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Package none implements a dummy BlobInfoCache which records no data.
|
||||
package none
|
||||
|
||||
import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// noCache implements a dummy BlobInfoCache which records no data.
|
||||
type noCache struct {
|
||||
}
|
||||
|
||||
// NoCache implements BlobInfoCache by not recording any data.
|
||||
//
|
||||
// This exists primarily for implementations of configGetter for
|
||||
// Manifest.Inspect, because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a
|
||||
// short-lived cache, ideally blobinfocache.DefaultCache.
|
||||
var NoCache types.BlobInfoCache = noCache{}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return nil
|
||||
}
|
||||
4
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
4
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@@ -3,10 +3,10 @@ package compression
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
@@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
return pgzip.NewReader(r)
|
||||
}
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
|
||||
15
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
15
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@@ -85,21 +85,6 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
// GetUserLoggedIn returns the username logged in to registry from either
|
||||
// auth.json or XDG_RUNTIME_DIR
|
||||
// Used to tell the user if someone is logged in to the registry when logging in
|
||||
func GetUserLoggedIn(sys *types.SystemContext, registry string) (string, error) {
|
||||
path, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
username, _, _ := findAuthentication(registry, path, false)
|
||||
if username != "" {
|
||||
return username, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication deletes the credentials stored in auth.json
|
||||
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
|
||||
417
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
417
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
@@ -0,0 +1,417 @@
|
||||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
)
|
||||
|
||||
// systemRegistriesConfPath is the path to the system-wide registry
|
||||
// configuration file and is used to add/subtract potential registries for
|
||||
// obtaining images. You can override this at build time with
|
||||
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
|
||||
var systemRegistriesConfPath = builtinRegistriesConfPath
|
||||
|
||||
// builtinRegistriesConfPath is the path to the registry configuration file.
|
||||
// DO NOT change this, instead see systemRegistriesConfPath above.
|
||||
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
|
||||
|
||||
// Endpoint describes a remote location of a registry.
|
||||
type Endpoint struct {
|
||||
// The endpoint's remote location.
|
||||
Location string `toml:"location"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
Insecure bool `toml:"insecure"`
|
||||
}
|
||||
|
||||
// RewriteReference will substitute the provided reference `prefix` to the
|
||||
// endpoints `location` from the `ref` and creates a new named reference from it.
|
||||
// The function errors if the newly created reference is not parsable.
|
||||
func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
|
||||
refString := ref.String()
|
||||
if !refMatchesPrefix(refString, prefix) {
|
||||
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
|
||||
}
|
||||
|
||||
newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
|
||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error rewriting reference")
|
||||
}
|
||||
logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
|
||||
return newParsedRef, nil
|
||||
}
|
||||
|
||||
// Registry represents a registry.
|
||||
type Registry struct {
|
||||
// A registry is an Endpoint too
|
||||
Endpoint
|
||||
// The registry's mirrors.
|
||||
Mirrors []Endpoint `toml:"mirror"`
|
||||
// If true, pulling from the registry will be blocked.
|
||||
Blocked bool `toml:"blocked"`
|
||||
// If true, the registry can be used when pulling an unqualified image.
|
||||
Search bool `toml:"unqualified-search"`
|
||||
// Prefix is used for matching images, and to translate one namespace to
|
||||
// another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
|
||||
// and we pull from "example.com/bar/myimage:latest", the image will
|
||||
// effectively be pulled from "example.com/foo/bar/myimage:latest".
|
||||
// If no Prefix is specified, it defaults to the specified location.
|
||||
Prefix string `toml:"prefix"`
|
||||
}
|
||||
|
||||
// V1TOMLregistries is for backwards compatibility to sysregistries v1
|
||||
type V1TOMLregistries struct {
|
||||
Registries []string `toml:"registries"`
|
||||
}
|
||||
|
||||
// V1TOMLConfig is for backwards compatibility to sysregistries v1
|
||||
type V1TOMLConfig struct {
|
||||
Search V1TOMLregistries `toml:"search"`
|
||||
Insecure V1TOMLregistries `toml:"insecure"`
|
||||
Block V1TOMLregistries `toml:"block"`
|
||||
}
|
||||
|
||||
// tomlConfig is the data type used to unmarshal the toml config.
|
||||
type tomlConfig struct {
|
||||
Registries []Registry `toml:"registry"`
|
||||
// backwards compatability to sysregistries v1
|
||||
V1TOMLConfig `toml:"registries"`
|
||||
}
|
||||
|
||||
// InvalidRegistries represents an invalid registry configurations. An example
|
||||
// is when "registry.com" is defined multiple times in the configuration but
|
||||
// with conflicting security settings.
|
||||
type InvalidRegistries struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// Error returns the error string.
|
||||
func (e *InvalidRegistries) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// parseLocation parses the input string, performs some sanity checks and returns
|
||||
// the sanitized input string. An error is returned if the input string is
|
||||
// empty or if contains an "http{s,}://" prefix.
|
||||
func parseLocation(input string) (string, error) {
|
||||
trimmed := strings.TrimRight(input, "/")
|
||||
|
||||
if trimmed == "" {
|
||||
return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
|
||||
msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
|
||||
return "", &InvalidRegistries{s: msg}
|
||||
}
|
||||
|
||||
return trimmed, nil
|
||||
}
|
||||
|
||||
// getV1Registries transforms v1 registries in the config into an array of v2
|
||||
// registries of type Registry.
|
||||
func getV1Registries(config *tomlConfig) ([]Registry, error) {
|
||||
regMap := make(map[string]*Registry)
|
||||
// We must preserve the order of config.V1Registries.Search.Registries at least. The order of the
|
||||
// other registries is not really important, but make it deterministic (the same for the same config file)
|
||||
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
|
||||
registryOrder := []string{}
|
||||
|
||||
getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
|
||||
var err error
|
||||
location, err = parseLocation(location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg, exists := regMap[location]
|
||||
if !exists {
|
||||
reg = &Registry{
|
||||
Endpoint: Endpoint{Location: location},
|
||||
Mirrors: []Endpoint{},
|
||||
Prefix: location,
|
||||
}
|
||||
regMap[location] = reg
|
||||
registryOrder = append(registryOrder, location)
|
||||
}
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
// Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order
|
||||
// if one of the search registries is also in one of the other lists.
|
||||
for _, search := range config.V1TOMLConfig.Search.Registries {
|
||||
reg, err := getRegistry(search)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Search = true
|
||||
}
|
||||
for _, blocked := range config.V1TOMLConfig.Block.Registries {
|
||||
reg, err := getRegistry(blocked)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Blocked = true
|
||||
}
|
||||
for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
|
||||
reg, err := getRegistry(insecure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Insecure = true
|
||||
}
|
||||
|
||||
registries := []Registry{}
|
||||
for _, location := range registryOrder {
|
||||
reg := regMap[location]
|
||||
registries = append(registries, *reg)
|
||||
}
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// postProcessRegistries checks the consistency of all registries (e.g., set
|
||||
// the Prefix to Location if not set) and applies conflict checks. It returns an
|
||||
// array of cleaned registries and error in case of conflicts.
|
||||
func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
var registries []Registry
|
||||
regMap := make(map[string][]Registry)
|
||||
|
||||
for _, reg := range regs {
|
||||
var err error
|
||||
|
||||
// make sure Location and Prefix are valid
|
||||
reg.Location, err = parseLocation(reg.Location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if reg.Prefix == "" {
|
||||
reg.Prefix = reg.Location
|
||||
} else {
|
||||
reg.Prefix, err = parseLocation(reg.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// make sure mirrors are valid
|
||||
for _, mir := range reg.Mirrors {
|
||||
mir.Location, err = parseLocation(mir.Location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
registries = append(registries, reg)
|
||||
regMap[reg.Location] = append(regMap[reg.Location], reg)
|
||||
}
|
||||
|
||||
// Given a registry can be mentioned multiple times (e.g., to have
|
||||
// multiple prefixes backed by different mirrors), we need to make sure
|
||||
// there are no conflicts among them.
|
||||
//
|
||||
// Note: we need to iterate over the registries array to ensure a
|
||||
// deterministic behavior which is not guaranteed by maps.
|
||||
for _, reg := range registries {
|
||||
others, _ := regMap[reg.Location]
|
||||
for _, other := range others {
|
||||
if reg.Insecure != other.Insecure {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
|
||||
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
if reg.Blocked != other.Blocked {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// getConfigPath returns the system-registries config path if specified.
|
||||
// Otherwise, systemRegistriesConfPath is returned.
|
||||
func getConfigPath(ctx *types.SystemContext) string {
|
||||
confPath := systemRegistriesConfPath
|
||||
if ctx != nil {
|
||||
if ctx.SystemRegistriesConfPath != "" {
|
||||
confPath = ctx.SystemRegistriesConfPath
|
||||
} else if ctx.RootForImplicitAbsolutePaths != "" {
|
||||
confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
|
||||
}
|
||||
}
|
||||
return confPath
|
||||
}
|
||||
|
||||
// configMutex is used to synchronize concurrent accesses to configCache.
|
||||
var configMutex = sync.Mutex{}
|
||||
|
||||
// configCache caches already loaded configs with config paths as keys and is
|
||||
// used to avoid redudantly parsing configs. Concurrent accesses to the cache
|
||||
// are synchronized via configMutex.
|
||||
var configCache = make(map[string][]Registry)
|
||||
|
||||
// InvalidateCache invalidates the registry cache. This function is meant to be
|
||||
// used for long-running processes that need to reload potential changes made to
|
||||
// the cached registry config files.
|
||||
func InvalidateCache() {
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
configCache = make(map[string][]Registry)
|
||||
}
|
||||
|
||||
// GetRegistries loads and returns the registries specified in the config.
|
||||
// Note the parsed content of registry config files is cached. For reloading,
|
||||
// use `InvalidateCache` and re-call `GetRegistries`.
|
||||
func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
|
||||
configPath := getConfigPath(ctx)
|
||||
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
// if the config has already been loaded, return the cached registries
|
||||
if registries, inCache := configCache[configPath]; inCache {
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// load the config
|
||||
config, err := loadRegistryConf(configPath)
|
||||
if err != nil {
|
||||
// Return an empty []Registry if we use the default config,
|
||||
// which implies that the config path of the SystemContext
|
||||
// isn't set. Note: if ctx.SystemRegistriesConfPath points to
|
||||
// the default config, we will still return an error.
|
||||
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
|
||||
return []Registry{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registries := config.Registries
|
||||
|
||||
// backwards compatibility for v1 configs
|
||||
v1Registries, err := getV1Registries(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v1Registries) > 0 {
|
||||
if len(registries) > 0 {
|
||||
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
|
||||
}
|
||||
registries = v1Registries
|
||||
}
|
||||
|
||||
registries, err = postProcessRegistries(registries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// populate the cache
|
||||
configCache[configPath] = registries
|
||||
|
||||
return registries, err
|
||||
}
|
||||
|
||||
// FindUnqualifiedSearchRegistries returns all registries that are configured
|
||||
// for unqualified image search (i.e., with Registry.Search == true).
|
||||
func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) {
|
||||
registries, err := GetRegistries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unqualified := []Registry{}
|
||||
for _, reg := range registries {
|
||||
if reg.Search {
|
||||
unqualified = append(unqualified, reg)
|
||||
}
|
||||
}
|
||||
return unqualified, nil
|
||||
}
|
||||
|
||||
// refMatchesPrefix returns true iff ref,
|
||||
// which is a registry, repository namespace, repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!),
|
||||
// matches a Registry.Prefix value.
|
||||
// (This is split from the caller primarily to make testing easier.)
|
||||
func refMatchesPrefix(ref, prefix string) bool {
|
||||
switch {
|
||||
case len(ref) < len(prefix):
|
||||
return false
|
||||
case len(ref) == len(prefix):
|
||||
return ref == prefix
|
||||
case len(ref) > len(prefix):
|
||||
if !strings.HasPrefix(ref, prefix) {
|
||||
return false
|
||||
}
|
||||
c := ref[len(prefix)]
|
||||
// This allows "example.com:5000" to match "example.com",
|
||||
// which is unintended; that will get fixed eventually, DON'T RELY
|
||||
// ON THE CURRENT BEHAVIOR.
|
||||
return c == ':' || c == '/' || c == '@'
|
||||
default:
|
||||
panic("Internal error: impossible comparison outcome")
|
||||
}
|
||||
}
|
||||
|
||||
// FindRegistry returns the Registry with the longest prefix for ref,
|
||||
// which is a registry, repository namespace repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!).
|
||||
// If no Registry prefixes the image, nil is returned.
|
||||
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
|
||||
registries, err := GetRegistries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reg := Registry{}
|
||||
prefixLen := 0
|
||||
for _, r := range registries {
|
||||
if refMatchesPrefix(ref, r.Prefix) {
|
||||
length := len(r.Prefix)
|
||||
if length > prefixLen {
|
||||
reg = r
|
||||
prefixLen = length
|
||||
}
|
||||
}
|
||||
}
|
||||
if prefixLen != 0 {
|
||||
return ®, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Reads the global registry file from the filesystem. Returns a byte array.
|
||||
func readRegistryConf(configPath string) ([]byte, error) {
|
||||
configBytes, err := ioutil.ReadFile(configPath)
|
||||
return configBytes, err
|
||||
}
|
||||
|
||||
// Used in unittests to parse custom configs without a types.SystemContext.
|
||||
var readConf = readRegistryConf
|
||||
|
||||
// Loads the registry configuration file from the filesystem and then unmarshals
|
||||
// it. Returns the unmarshalled object.
|
||||
func loadRegistryConf(configPath string) (*tomlConfig, error) {
|
||||
config := &tomlConfig{}
|
||||
|
||||
configBytes, err := readConf(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(configBytes, &config)
|
||||
return config, err
|
||||
}
|
||||
65
vendor/github.com/containers/image/registries.conf
generated
vendored
Normal file
65
vendor/github.com/containers/image/registries.conf
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# For more information on this configuration file, see containers-registries.conf(5).
|
||||
#
|
||||
# There are multiple versions of the configuration syntax available, where the
|
||||
# second iteration is backwards compatible to the first one. Mixing up both
|
||||
# formats will result in an runtime error.
|
||||
#
|
||||
# The initial configuration format looks like this:
|
||||
#
|
||||
# Registries to search for images that are not fully-qualified.
|
||||
# i.e. foobar.com/my_image:latest vs my_image:latest
|
||||
[registries.search]
|
||||
registries = []
|
||||
|
||||
# Registries that do not use TLS when pulling images or uses self-signed
|
||||
# certificates.
|
||||
[registries.insecure]
|
||||
registries = []
|
||||
|
||||
# Blocked Registries, blocks the `docker daemon` from pulling from the blocked registry. If you specify
|
||||
# "*", then the docker daemon will only be allowed to pull from registries listed above in the search
|
||||
# registries. Blocked Registries is deprecated because other container runtimes and tools will not use it.
|
||||
# It is recommended that you use the trust policy file /etc/containers/policy.json to control which
|
||||
# registries you want to allow users to pull and push from. policy.json gives greater flexibility, and
|
||||
# supports all container runtimes and tools including the docker daemon, cri-o, buildah ...
|
||||
# The atomic CLI `atomic trust` can be used to easily configure the policy.json file.
|
||||
[registries.block]
|
||||
registries = []
|
||||
|
||||
# The second version of the configuration format allows to specify registry
|
||||
# mirrors:
|
||||
#
|
||||
# [[registry]]
|
||||
# # The main location of the registry
|
||||
# location = "example.com"
|
||||
#
|
||||
# # If true, certs verification will be skipped and HTTP (non-TLS) connections
|
||||
# # will be allowed.
|
||||
# insecure = false
|
||||
#
|
||||
# # Prefix is used for matching images, and to translate one namespace to
|
||||
# # another. If `prefix = "example.com/foo"`, `location = "example.com"` and
|
||||
# # we pull from "example.com/foo/myimage:latest", the image will effectively be
|
||||
# # pulled from "example.com/myimage:latest". If no Prefix is specified,
|
||||
# # it defaults to the specified `location`. When a prefix is used, then a pull
|
||||
# # without specifying the prefix is not possible any more.
|
||||
# prefix = "example.com/foo"
|
||||
#
|
||||
# # If true, the registry can be used when pulling an unqualified image. If a
|
||||
# # prefix is specified, unqualified pull is not possible any more.
|
||||
# unqualified-search = false
|
||||
#
|
||||
# # If true, pulling from the registry will be blocked.
|
||||
# blocked = false
|
||||
#
|
||||
# # All available mirrors of the registry. The mirrors will be evaluated in
|
||||
# # order during an image pull. Furthermore it is possible to specify the
|
||||
# # `insecure` flag per registry mirror, too.
|
||||
# mirror = [
|
||||
# { location = "example-mirror-0.local", insecure = false },
|
||||
# { location = "example-mirror-1.local", insecure = true },
|
||||
# # It is also possible to specify an additional path within the `location`.
|
||||
# # A pull to `example.com/foo/image:latest` will then result in
|
||||
# # `example-mirror-2.local/path/image:latest`.
|
||||
# { location = "example-mirror-2.local/path" },
|
||||
# ]
|
||||
2
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
2
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
|
||||
var systemDefaultPolicyPath = builtinDefaultPolicyPath
|
||||
|
||||
// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
|
||||
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
// DO NOT change this, instead see systemDefaultPolicyPath above.
|
||||
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
|
||||
|
||||
|
||||
215
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
215
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@@ -11,11 +11,14 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
@@ -46,6 +49,7 @@ type storageImageSource struct {
|
||||
image *storage.Image
|
||||
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
@@ -55,6 +59,7 @@ type storageImageDestination struct {
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
signatures []byte // Signature contents, temporary
|
||||
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
@@ -66,6 +71,13 @@ type storageImageCloser struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
||||
// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably;
|
||||
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
|
||||
func manifestBigDataKey(digest digest.Digest) string {
|
||||
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
|
||||
}
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
// First, locate the image.
|
||||
@@ -90,17 +102,24 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
}
|
||||
|
||||
// Reference returns the image reference that we used to find this image.
|
||||
func (s storageImageSource) Reference() types.ImageReference {
|
||||
func (s *storageImageSource) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
// Close cleans up any resources we tied up while reading the image.
|
||||
func (s storageImageSource) Close() error {
|
||||
func (s *storageImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *storageImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
@@ -134,8 +153,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
|
||||
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
|
||||
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
|
||||
// just go ahead and use the first one every time.
|
||||
s.getBlobMutex.Lock()
|
||||
i := s.layerPosition[info.Digest]
|
||||
s.layerPosition[info.Digest] = i + 1
|
||||
s.getBlobMutex.Unlock()
|
||||
if len(layers) > 0 {
|
||||
layer = layers[i%len(layers)]
|
||||
}
|
||||
@@ -164,12 +185,29 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
return nil, "", ErrNoManifestLists
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
// We stored the manifest as an item named after storage.ImageDigestBigDataKey.
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
||||
key := manifestBigDataKey(digested.Digest())
|
||||
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
|
||||
return nil, "", err
|
||||
}
|
||||
if err == nil {
|
||||
s.cachedManifest = blob
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
}
|
||||
@@ -297,7 +335,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
func (s *storageImageDestination) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
@@ -306,7 +344,7 @@ func (s *storageImageDestination) Close() error {
|
||||
return os.RemoveAll(s.directory)
|
||||
}
|
||||
|
||||
func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
// We ultimately have to decompress layers to populate trees on disk,
|
||||
// so callers shouldn't bother compressing them before handing them to
|
||||
// us, if they're not already compressed.
|
||||
@@ -317,9 +355,22 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
}
|
||||
|
||||
// PutBlob stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (s *storageImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
@@ -359,9 +410,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// Record information about the blob.
|
||||
s.putBlobMutex.Lock()
|
||||
s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
|
||||
s.fileSizes[hasher.Digest()] = counter.Count
|
||||
s.filenames[hasher.Digest()] = filename
|
||||
s.putBlobMutex.Unlock()
|
||||
blobDigest := blobinfo.Digest
|
||||
if blobDigest.Validate() != nil {
|
||||
blobDigest = hasher.Digest()
|
||||
@@ -370,6 +423,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
if blobSize < 0 {
|
||||
blobSize = counter.Count
|
||||
}
|
||||
// This is safe because we have just computed both values ourselves.
|
||||
cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||
return types.BlobInfo{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
@@ -377,59 +432,85 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
|
||||
// reapplied using ReapplyBlob.
|
||||
//
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.putBlobMutex.Lock()
|
||||
defer s.putBlobMutex.Unlock()
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
}
|
||||
if err := blobinfo.Digest.Validate(); err != nil {
|
||||
return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
}
|
||||
|
||||
// Check if we've already cached it in a file.
|
||||
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
||||
return true, size, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: size,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].UncompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].CompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].CompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
// Nope, we don't have it.
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
|
||||
// same one when it walks the list in the manifest.
|
||||
func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
present, size, err := s.HasBlob(ctx, blobinfo)
|
||||
if !present {
|
||||
return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
|
||||
// Does the blob correspond to a known DiffID which we already have available?
|
||||
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
||||
// uncompressed layer, and that can happen only if canSubstitute.
|
||||
if canSubstitute {
|
||||
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
return true, types.BlobInfo{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
|
||||
}
|
||||
blobinfo.Size = size
|
||||
return blobinfo, nil
|
||||
|
||||
// Nope, we don't have it.
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// computeID computes a recommended image ID based on information we have so far. If
|
||||
@@ -514,8 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seeen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.HasBlob(ctx, blob.BlobInfo)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
@@ -600,6 +685,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
}
|
||||
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
// was originally created, in case we're just copying it. If not, no harm done.
|
||||
options := &storage.ImageOptions{}
|
||||
@@ -607,9 +693,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
logrus.Debugf("setting image creation date to %s", inspect.Created)
|
||||
options.CreationDate = *inspect.Created
|
||||
}
|
||||
if manifestDigest, err := manifest.Digest(s.manifest); err == nil {
|
||||
options.Digest = manifestDigest
|
||||
}
|
||||
// Create the image record, pointing to the most-recently added layer.
|
||||
intendedID := s.imageRef.id
|
||||
if intendedID == "" {
|
||||
@@ -649,7 +732,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
@@ -675,9 +758,21 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
logrus.Debugf("set names of image %q to %v", img.ID, names)
|
||||
}
|
||||
// Save the manifest. Use storage.ImageDigestBigDataKey as the item's
|
||||
// name, so that its digest can be used to locate the image in the Store.
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
|
||||
// Save the manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
||||
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
||||
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
||||
manifestDigest, err := manifest.Digest(s.manifest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing manifest digest")
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
@@ -686,7 +781,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
// Save the signatures, if we have any.
|
||||
if len(s.signatures) > 0 {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
@@ -728,9 +823,21 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
|
||||
}
|
||||
|
||||
// PutManifest writes the manifest to the destination.
|
||||
func (s *storageImageDestination) PutManifest(ctx context.Context, manifest []byte) error {
|
||||
s.manifest = make([]byte, len(manifest))
|
||||
copy(s.manifest, manifest)
|
||||
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error {
|
||||
if s.imageRef.named != nil {
|
||||
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
||||
matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matches {
|
||||
return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.manifest = make([]byte, len(manifestBlob))
|
||||
copy(s.manifest, manifestBlob)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
22
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
22
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
@@ -55,7 +55,7 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
|
||||
// one present with the same name or ID, and return the image.
|
||||
func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
var loadedImage *storage.Image
|
||||
if s.id == "" {
|
||||
if s.id == "" && s.named != nil {
|
||||
// Look for an image that has the expanded reference name as an explicit Name value.
|
||||
image, err := s.transport.store.Image(s.named.String())
|
||||
if image != nil && err == nil {
|
||||
@@ -69,7 +69,7 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
||||
if images != nil && err == nil {
|
||||
if err == nil && len(images) > 0 {
|
||||
for _, image := range images {
|
||||
if imageMatchesRepo(image, s.named) {
|
||||
loadedImage = image
|
||||
@@ -97,6 +97,24 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
}
|
||||
// Default to having the image digest that we hand back match the most recently
|
||||
// added manifest...
|
||||
if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
|
||||
loadedImage.Digest = digest
|
||||
}
|
||||
// ... unless the named reference says otherwise, and it matches one of the digests
|
||||
// in the image. For those cases, set the Digest field to that value, for the
|
||||
// sake of older consumers that don't know there's a whole list in there now.
|
||||
if s.named != nil {
|
||||
if digested, ok := s.named.(reference.Digested); ok {
|
||||
for _, digest := range loadedImage.Digests {
|
||||
if digest == digested.Digest() {
|
||||
loadedImage.Digest = digest
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return loadedImage, nil
|
||||
}
|
||||
|
||||
|
||||
12
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
12
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@@ -107,7 +107,7 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
if ref == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref)
|
||||
}
|
||||
if ref[0] == '[' {
|
||||
// Ignore the store specifier.
|
||||
@@ -180,7 +180,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
// Return the transport's previously-set store. If we don't have one
|
||||
// of those, initialize one now.
|
||||
if s.store == nil {
|
||||
options := storage.DefaultStoreOptions
|
||||
options, err := storage.DefaultStoreOptionsAutoDetectUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.UIDMap = s.defaultUIDMap
|
||||
options.GIDMap = s.defaultGIDMap
|
||||
store, err := storage.GetStore(options)
|
||||
@@ -284,11 +287,6 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
|
||||
}
|
||||
}
|
||||
if sref, ok := ref.(*storageReference); ok {
|
||||
if sref.id != "" {
|
||||
if img, err := store.Image(sref.id); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
tmpRef := *sref
|
||||
if img, err := tmpRef.resolveImage(); err == nil {
|
||||
return img, nil
|
||||
|
||||
15
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
15
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
@@ -2,7 +2,6 @@ package tarball
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecs "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
||||
|
||||
// Set up to digest the file after we maybe decompress it.
|
||||
diffIDdigester := digest.Canonical.Digester()
|
||||
uncompressed, err := gzip.NewReader(reader)
|
||||
uncompressed, err := pgzip.NewReader(reader)
|
||||
if err == nil {
|
||||
// It is compressed, so the diffID is the digest of the uncompressed version
|
||||
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
|
||||
@@ -207,7 +206,15 @@ func (is *tarballImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
// We should only be asked about things in the manifest. Maybe the configuration blob.
|
||||
if blobinfo.Digest == is.configID {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||
|
||||
12
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
12
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
// ParseImageName converts a URL-like image name to a types.ImageReference.
|
||||
func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
// Keep this in sync with TransportFromImageName!
|
||||
parts := strings.SplitN(imgName, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
@@ -32,3 +33,14 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
}
|
||||
return transport.ParseReference(parts[1])
|
||||
}
|
||||
|
||||
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
|
||||
// the transport is unknown or when the input is invalid.
|
||||
func TransportFromImageName(imageName string) types.ImageTransport {
|
||||
// Keep this in sync with ParseImageName!
|
||||
parts := strings.SplitN(imageName, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
return transports.Get(parts[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
133
vendor/github.com/containers/image/types/types.go
generated
vendored
133
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -100,6 +100,82 @@ type BlobInfo struct {
|
||||
MediaType string
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
|
||||
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICTransportScope struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
|
||||
// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
|
||||
// can look it up using BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICLocationReference struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
|
||||
type BICReplacementCandidate struct {
|
||||
Digest digest.Digest
|
||||
Location BICLocationReference
|
||||
}
|
||||
|
||||
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
|
||||
//
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
//
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
//
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
//
|
||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||
// users of the cahce should just fall back to copying the blobs the usual way.
|
||||
type BlobInfoCache interface {
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
UncompressedDigest(anyDigest digest.Digest) digest.Digest
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
|
||||
}
|
||||
|
||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
|
||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||
// is usually more useful.
|
||||
@@ -120,7 +196,10 @@ type ImageSource interface {
|
||||
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
HasThreadSafeGetBlob() bool
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
@@ -148,8 +227,7 @@ const (
|
||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||
//
|
||||
// There is a specific required order for some of the calls:
|
||||
// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
|
||||
// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
|
||||
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
|
||||
//
|
||||
@@ -183,17 +261,21 @@ type ImageDestination interface {
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
||||
ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
HasThreadSafePutBlob() bool
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
||||
// PutManifest writes manifest to the destination.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
@@ -319,11 +401,36 @@ type ImageInspectInfo struct {
|
||||
}
|
||||
|
||||
// DockerAuthConfig contains authorization information for connecting to a registry.
|
||||
// the value of Username and Password can be empty for accessing the registry anonymously
|
||||
type DockerAuthConfig struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// OptionalBool is a boolean with an additional undefined value, which is meant
|
||||
// to be used in the context of user input to distinguish between a
|
||||
// user-specified value and a default value.
|
||||
type OptionalBool byte
|
||||
|
||||
const (
|
||||
// OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written.
|
||||
OptionalBoolUndefined OptionalBool = iota
|
||||
// OptionalBoolTrue represents the boolean true.
|
||||
OptionalBoolTrue
|
||||
// OptionalBoolFalse represents the boolean false.
|
||||
OptionalBoolFalse
|
||||
)
|
||||
|
||||
// NewOptionalBool converts the input bool into either OptionalBoolTrue or
|
||||
// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
|
||||
func NewOptionalBool(b bool) OptionalBool {
|
||||
o := OptionalBoolFalse
|
||||
if b == true {
|
||||
o = OptionalBoolTrue
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// SystemContext allows parameterizing access to implicitly-accessed resources,
|
||||
// like configuration files in /etc and users' login state in their home directory.
|
||||
// Various components can share the same field only if their semantics is exactly
|
||||
@@ -351,6 +458,8 @@ type SystemContext struct {
|
||||
ArchitectureChoice string
|
||||
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
|
||||
OSChoice string
|
||||
// If not "", overrides the system's default directory containing a blob info cache.
|
||||
BlobInfoCacheDir string
|
||||
|
||||
// Additional tags when creating or copying a docker-archive.
|
||||
DockerArchiveAdditionalTags []reference.NamedTagged
|
||||
@@ -376,7 +485,7 @@ type SystemContext struct {
|
||||
// Ignored if DockerCertPath is non-empty.
|
||||
DockerPerHostCertDirPath string
|
||||
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
DockerInsecureSkipTLSVerify bool
|
||||
DockerInsecureSkipTLSVerify OptionalBool
|
||||
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
|
||||
DockerAuthConfig *DockerAuthConfig
|
||||
// if not "", an User-Agent header is added to each request when contacting a registry.
|
||||
|
||||
18
vendor/github.com/containers/image/vendor.conf
generated
vendored
18
vendor/github.com/containers/image/vendor.conf
generated
vendored
@@ -1,7 +1,7 @@
|
||||
github.com/containers/image
|
||||
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/containers/storage master
|
||||
github.com/containers/storage v1.12.2
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
@@ -13,10 +13,9 @@ github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
|
||||
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
@@ -26,20 +25,27 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||
github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
|
||||
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
github.com/pquerna/ffjson master
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
|
||||
4
vendor/github.com/containers/image/version/version.go
generated
vendored
4
vendor/github.com/containers/image/version/version.go
generated
vendored
@@ -4,9 +4,9 @@ import "fmt"
|
||||
|
||||
const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 0
|
||||
VersionMajor = 1
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 1
|
||||
VersionMinor = 7
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/README.md
generated
vendored
2
vendor/github.com/containers/storage/README.md
generated
vendored
@@ -2,7 +2,7 @@
|
||||
layers, container images, and containers. A `containers-storage` CLI wrapper
|
||||
is also included for manual and scripting use.
|
||||
|
||||
To build the CLI wrapper, use 'make build-binary'.
|
||||
To build the CLI wrapper, use 'make binary'.
|
||||
|
||||
Operations which use VMs expect to launch them using 'vagrant', defaulting to
|
||||
using its 'libvirt' provider. The boxes used are also available for the
|
||||
|
||||
39
vendor/github.com/containers/storage/containers.go
generated
vendored
39
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -71,7 +71,7 @@ type Container struct {
|
||||
type ContainerStore interface {
|
||||
FileBasedStore
|
||||
MetadataStore
|
||||
BigDataStore
|
||||
ContainerBigDataStore
|
||||
FlaggableStore
|
||||
|
||||
// Create creates a container that has a specified ID (or generates a
|
||||
@@ -133,6 +133,27 @@ func copyContainer(c *Container) *Container {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) MountLabel() string {
|
||||
if label, ok := c.Flags["MountLabel"].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) ProcessLabel() string {
|
||||
if label, ok := c.Flags["ProcessLabel"].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) MountOpts() []string {
|
||||
if mountOpts, ok := c.Flags["MountOpts"].([]string); ok {
|
||||
return mountOpts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
containers := make([]Container, len(r.containers))
|
||||
for i := range r.containers {
|
||||
@@ -279,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
if _, idInUse := r.byid[id]; idInUse {
|
||||
return nil, ErrDuplicateID
|
||||
}
|
||||
if options.MountOpts != nil {
|
||||
options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
|
||||
}
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
@@ -297,7 +321,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
Flags: copyStringInterfaceMap(options.Flags),
|
||||
UIDMap: copyIDMap(options.UIDMap),
|
||||
GIDMap: copyIDMap(options.GIDMap),
|
||||
}
|
||||
@@ -432,7 +456,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
return size, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
if err = r.SetBigData(id, key, data); err == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrContainerUnknown
|
||||
@@ -440,6 +464,8 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
if size, ok := c.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
} else {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
return -1, ErrSizeUnknown
|
||||
@@ -460,7 +486,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
return d, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
if err = r.SetBigData(id, key, data); err == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrContainerUnknown
|
||||
@@ -468,6 +494,8 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
if d, ok := c.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
@@ -544,6 +572,9 @@ func (r *containerStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
||||
func (r *containerStore) RLock() {
|
||||
r.lockfile.RLock()
|
||||
}
|
||||
func (r *containerStore) Unlock() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
1
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: containers.go
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
|
||||
31
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
31
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -253,6 +253,11 @@ func (a *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@@ -405,7 +410,7 @@ func atomicRemove(source string) error {
|
||||
case os.IsExist(err):
|
||||
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
|
||||
if _, e := os.Stat(source); !os.IsNotExist(e) {
|
||||
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
|
||||
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
|
||||
}
|
||||
default:
|
||||
return errors.Wrapf(err, "error preparing atomic delete")
|
||||
@@ -416,7 +421,7 @@ func atomicRemove(source string) error {
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
@@ -441,7 +446,7 @@ func (a *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (s
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
if len(parents) > 0 {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
if err := a.mount(id, m, parents, options); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@@ -585,7 +590,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
|
||||
func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
@@ -596,7 +601,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
|
||||
|
||||
rw := a.getDiffPath(id)
|
||||
|
||||
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
|
||||
if err := a.aufsMount(layers, rw, target, options); err != nil {
|
||||
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
|
||||
}
|
||||
return nil
|
||||
@@ -643,7 +648,7 @@ func (a *Driver) Cleanup() error {
|
||||
return mountpk.Unmount(a.root)
|
||||
}
|
||||
|
||||
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
@@ -657,7 +662,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
if useDirperm() {
|
||||
offset += len(",dirperm1")
|
||||
}
|
||||
b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
|
||||
b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
|
||||
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
|
||||
|
||||
index := 0
|
||||
@@ -670,21 +675,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
}
|
||||
|
||||
opts := "dio,xino=/dev/shm/aufs.xino"
|
||||
if a.mountOptions != "" {
|
||||
opts += fmt.Sprintf(",%s", a.mountOptions)
|
||||
mountOptions := a.mountOptions
|
||||
if len(options.Options) > 0 {
|
||||
mountOptions = strings.Join(options.Options, ",")
|
||||
}
|
||||
if mountOptions != "" {
|
||||
opts += fmt.Sprintf(",%s", mountOptions)
|
||||
}
|
||||
|
||||
if useDirperm() {
|
||||
opts += ",dirperm1"
|
||||
}
|
||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
|
||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
|
||||
if err = mount("none", target, "aufs", 0, data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for ; index < len(ro); index++ {
|
||||
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
|
||||
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
10
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
10
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -490,6 +490,11 @@ func (d *Driver) quotasDirID(id string) string {
|
||||
return path.Join(d.quotasDir(), id)
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@@ -634,12 +639,15 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get the requested filesystem id.
|
||||
func (d *Driver) Get(id, mountLabel string, uidMaps, gidMaps []idtools.IDMap) (string, error) {
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
dir := d.subvolumesDirID(id)
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(options.Options) > 0 {
|
||||
return "", fmt.Errorf("btrfs driver does not support mount options")
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@@ -114,7 +114,10 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
|
||||
// same "container" IDs.
|
||||
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
driver := n.ProtoDriver
|
||||
layerFs, err := driver.Get(id, mountLabel, nil, nil)
|
||||
options := MountOpts{
|
||||
MountLabel: mountLabel,
|
||||
}
|
||||
layerFs, err := driver.Get(id, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
22
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
22
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
)
|
||||
|
||||
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
|
||||
@@ -45,10 +46,31 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
|
||||
uid, gid = mappedPair.UID, mappedPair.GID
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: lstat(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
cap, err := system.Lgetxattr(path, "security.capability")
|
||||
if err != nil && err != system.ErrNotSupportedPlatform {
|
||||
return fmt.Errorf("%s: Lgetxattr(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
|
||||
// Make the change.
|
||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
// Restore the SUID and SGID bits if they were originally set.
|
||||
if (stat.Mode()&os.ModeSymlink == 0) && stat.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {
|
||||
if err := os.Chmod(path, stat.Mode()); err != nil {
|
||||
return fmt.Errorf("%s: chmod(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
if cap != nil {
|
||||
if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil {
|
||||
return fmt.Errorf("%s: Lsetxattr(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user